mirror of
https://github.com/github/codeql.git
synced 2026-05-01 11:45:14 +02:00
Update golang.org/x/tools dependency
This commit is contained in:
2
go.mod
2
go.mod
@@ -2,4 +2,4 @@ module github.com/Semmle/go
|
||||
|
||||
go 1.13
|
||||
|
||||
require golang.org/x/tools v0.0.0-20191030225452-7871c2d76733
|
||||
require golang.org/x/tools v0.0.0-20200109174759-ac4f524c1612
|
||||
|
||||
11
go.sum
11
go.sum
@@ -1,9 +1,12 @@
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20191030225452-7871c2d76733 h1:wtYExk7epHk5WDdLiCO92FIXY5eiMtZqV1RMSLiR/3M=
|
||||
golang.org/x/tools v0.0.0-20191030225452-7871c2d76733/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/tools v0.0.0-20200109174759-ac4f524c1612 h1:wRxHHuBMuDzijfZQMAgmVpDDTra91XF84qmoVTyj+U0=
|
||||
golang.org/x/tools v0.0.0-20200109174759-ac4f524c1612/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
131
vendor/golang.org/x/tools/benchmark/parse/parse.go
generated
vendored
Normal file
131
vendor/golang.org/x/tools/benchmark/parse/parse.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package parse provides support for parsing benchmark results as
|
||||
// generated by 'go test -bench'.
|
||||
package parse // import "golang.org/x/tools/benchmark/parse"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Flags used by Benchmark.Measured to indicate
|
||||
// which measurements a Benchmark contains.
|
||||
const (
|
||||
NsPerOp = 1 << iota
|
||||
MBPerS
|
||||
AllocedBytesPerOp
|
||||
AllocsPerOp
|
||||
)
|
||||
|
||||
// Benchmark is one run of a single benchmark.
|
||||
type Benchmark struct {
|
||||
Name string // benchmark name
|
||||
N int // number of iterations
|
||||
NsPerOp float64 // nanoseconds per iteration
|
||||
AllocedBytesPerOp uint64 // bytes allocated per iteration
|
||||
AllocsPerOp uint64 // allocs per iteration
|
||||
MBPerS float64 // MB processed per second
|
||||
Measured int // which measurements were recorded
|
||||
Ord int // ordinal position within a benchmark run
|
||||
}
|
||||
|
||||
// ParseLine extracts a Benchmark from a single line of testing.B
|
||||
// output.
|
||||
func ParseLine(line string) (*Benchmark, error) {
|
||||
fields := strings.Fields(line)
|
||||
|
||||
// Two required, positional fields: Name and iterations.
|
||||
if len(fields) < 2 {
|
||||
return nil, fmt.Errorf("two fields required, have %d", len(fields))
|
||||
}
|
||||
if !strings.HasPrefix(fields[0], "Benchmark") {
|
||||
return nil, fmt.Errorf(`first field does not start with "Benchmark"`)
|
||||
}
|
||||
n, err := strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := &Benchmark{Name: fields[0], N: n}
|
||||
|
||||
// Parse any remaining pairs of fields; we've parsed one pair already.
|
||||
for i := 1; i < len(fields)/2; i++ {
|
||||
b.parseMeasurement(fields[i*2], fields[i*2+1])
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (b *Benchmark) parseMeasurement(quant string, unit string) {
|
||||
switch unit {
|
||||
case "ns/op":
|
||||
if f, err := strconv.ParseFloat(quant, 64); err == nil {
|
||||
b.NsPerOp = f
|
||||
b.Measured |= NsPerOp
|
||||
}
|
||||
case "MB/s":
|
||||
if f, err := strconv.ParseFloat(quant, 64); err == nil {
|
||||
b.MBPerS = f
|
||||
b.Measured |= MBPerS
|
||||
}
|
||||
case "B/op":
|
||||
if i, err := strconv.ParseUint(quant, 10, 64); err == nil {
|
||||
b.AllocedBytesPerOp = i
|
||||
b.Measured |= AllocedBytesPerOp
|
||||
}
|
||||
case "allocs/op":
|
||||
if i, err := strconv.ParseUint(quant, 10, 64); err == nil {
|
||||
b.AllocsPerOp = i
|
||||
b.Measured |= AllocsPerOp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Benchmark) String() string {
|
||||
buf := new(bytes.Buffer)
|
||||
fmt.Fprintf(buf, "%s %d", b.Name, b.N)
|
||||
if (b.Measured & NsPerOp) != 0 {
|
||||
fmt.Fprintf(buf, " %.2f ns/op", b.NsPerOp)
|
||||
}
|
||||
if (b.Measured & MBPerS) != 0 {
|
||||
fmt.Fprintf(buf, " %.2f MB/s", b.MBPerS)
|
||||
}
|
||||
if (b.Measured & AllocedBytesPerOp) != 0 {
|
||||
fmt.Fprintf(buf, " %d B/op", b.AllocedBytesPerOp)
|
||||
}
|
||||
if (b.Measured & AllocsPerOp) != 0 {
|
||||
fmt.Fprintf(buf, " %d allocs/op", b.AllocsPerOp)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Set is a collection of benchmarks from one
|
||||
// testing.B run, keyed by name to facilitate comparison.
|
||||
type Set map[string][]*Benchmark
|
||||
|
||||
// ParseSet extracts a Set from testing.B output.
|
||||
// ParseSet preserves the order of benchmarks that have identical
|
||||
// names.
|
||||
func ParseSet(r io.Reader) (Set, error) {
|
||||
bb := make(Set)
|
||||
scan := bufio.NewScanner(r)
|
||||
ord := 0
|
||||
for scan.Scan() {
|
||||
if b, err := ParseLine(scan.Text()); err == nil {
|
||||
b.Ord = ord
|
||||
ord++
|
||||
bb[b.Name] = append(bb[b.Name], b)
|
||||
}
|
||||
}
|
||||
|
||||
if err := scan.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bb, nil
|
||||
}
|
||||
61
vendor/golang.org/x/tools/blog/atom/atom.go
generated
vendored
Normal file
61
vendor/golang.org/x/tools/blog/atom/atom.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Adapted from encoding/xml/read_test.go.
|
||||
|
||||
// Package atom defines XML data structures for an Atom feed.
|
||||
package atom // import "golang.org/x/tools/blog/atom"
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Feed struct {
|
||||
XMLName xml.Name `xml:"http://www.w3.org/2005/Atom feed"`
|
||||
Title string `xml:"title"`
|
||||
ID string `xml:"id"`
|
||||
Link []Link `xml:"link"`
|
||||
Updated TimeStr `xml:"updated"`
|
||||
Author *Person `xml:"author"`
|
||||
Entry []*Entry `xml:"entry"`
|
||||
}
|
||||
|
||||
type Entry struct {
|
||||
Title string `xml:"title"`
|
||||
ID string `xml:"id"`
|
||||
Link []Link `xml:"link"`
|
||||
Published TimeStr `xml:"published"`
|
||||
Updated TimeStr `xml:"updated"`
|
||||
Author *Person `xml:"author"`
|
||||
Summary *Text `xml:"summary"`
|
||||
Content *Text `xml:"content"`
|
||||
}
|
||||
|
||||
type Link struct {
|
||||
Rel string `xml:"rel,attr,omitempty"`
|
||||
Href string `xml:"href,attr"`
|
||||
Type string `xml:"type,attr,omitempty"`
|
||||
HrefLang string `xml:"hreflang,attr,omitempty"`
|
||||
Title string `xml:"title,attr,omitempty"`
|
||||
Length uint `xml:"length,attr,omitempty"`
|
||||
}
|
||||
|
||||
type Person struct {
|
||||
Name string `xml:"name"`
|
||||
URI string `xml:"uri,omitempty"`
|
||||
Email string `xml:"email,omitempty"`
|
||||
InnerXML string `xml:",innerxml"`
|
||||
}
|
||||
|
||||
type Text struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Body string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type TimeStr string
|
||||
|
||||
func Time(t time.Time) TimeStr {
|
||||
return TimeStr(t.Format("2006-01-02T15:04:05-07:00"))
|
||||
}
|
||||
20
vendor/golang.org/x/tools/container/intsets/popcnt_amd64.go
generated
vendored
Normal file
20
vendor/golang.org/x/tools/container/intsets/popcnt_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!appengine,!gccgo
|
||||
|
||||
package intsets
|
||||
|
||||
func popcnt(x word) int
|
||||
func havePOPCNT() bool
|
||||
|
||||
var hasPOPCNT = havePOPCNT()
|
||||
|
||||
// popcount returns the population count (number of set bits) of x.
|
||||
func popcount(x word) int {
|
||||
if hasPOPCNT {
|
||||
return popcnt(x)
|
||||
}
|
||||
return popcountTable(x) // faster than Hacker's Delight
|
||||
}
|
||||
30
vendor/golang.org/x/tools/container/intsets/popcnt_amd64.s
generated
vendored
Normal file
30
vendor/golang.org/x/tools/container/intsets/popcnt_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!appengine,!gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func havePOPCNT() bool
|
||||
TEXT ·havePOPCNT(SB),4,$0
|
||||
MOVQ $1, AX
|
||||
CPUID
|
||||
SHRQ $23, CX
|
||||
ANDQ $1, CX
|
||||
MOVB CX, ret+0(FP)
|
||||
RET
|
||||
|
||||
// func popcnt(word) int
|
||||
TEXT ·popcnt(SB),NOSPLIT,$0-8
|
||||
XORQ AX, AX
|
||||
MOVQ x+0(FP), SI
|
||||
// POPCNT (SI), AX is not recognized by Go assembler,
|
||||
// so we assemble it ourselves.
|
||||
BYTE $0xf3
|
||||
BYTE $0x48
|
||||
BYTE $0x0f
|
||||
BYTE $0xb8
|
||||
BYTE $0xc6
|
||||
MOVQ AX, ret+8(FP)
|
||||
RET
|
||||
9
vendor/golang.org/x/tools/container/intsets/popcnt_gccgo.go
generated
vendored
Normal file
9
vendor/golang.org/x/tools/container/intsets/popcnt_gccgo.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build gccgo
|
||||
|
||||
package intsets
|
||||
|
||||
func popcount(x word) int
|
||||
19
vendor/golang.org/x/tools/container/intsets/popcnt_gccgo_c.c
generated
vendored
Normal file
19
vendor/golang.org/x/tools/container/intsets/popcnt_gccgo_c.c
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build gccgo
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdint.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#define _STRINGIFY2_(x) #x
|
||||
#define _STRINGIFY_(x) _STRINGIFY2_(x)
|
||||
#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
|
||||
|
||||
extern intptr_t popcount(uintptr_t x) __asm__(GOSYM_PREFIX GOPKGPATH ".popcount");
|
||||
|
||||
intptr_t popcount(uintptr_t x) {
|
||||
return __builtin_popcountl((unsigned long)(x));
|
||||
}
|
||||
33
vendor/golang.org/x/tools/container/intsets/popcnt_generic.go
generated
vendored
Normal file
33
vendor/golang.org/x/tools/container/intsets/popcnt_generic.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64 appengine
|
||||
// +build !gccgo
|
||||
|
||||
package intsets
|
||||
|
||||
import "runtime"
|
||||
|
||||
// We compared three algorithms---Hacker's Delight, table lookup,
|
||||
// and AMD64's SSE4.1 hardware POPCNT---on a 2.67GHz Xeon X5550.
|
||||
//
|
||||
// % GOARCH=amd64 go test -run=NONE -bench=Popcount
|
||||
// POPCNT 5.12 ns/op
|
||||
// Table 8.53 ns/op
|
||||
// HackersDelight 9.96 ns/op
|
||||
//
|
||||
// % GOARCH=386 go test -run=NONE -bench=Popcount
|
||||
// Table 10.4 ns/op
|
||||
// HackersDelight 5.23 ns/op
|
||||
//
|
||||
// (AMD64's ABM1 hardware supports ntz and nlz too,
|
||||
// but they aren't critical.)
|
||||
|
||||
// popcount returns the population count (number of set bits) of x.
|
||||
func popcount(x word) int {
|
||||
if runtime.GOARCH == "386" {
|
||||
return popcountHD(uint32(x))
|
||||
}
|
||||
return popcountTable(x)
|
||||
}
|
||||
1091
vendor/golang.org/x/tools/container/intsets/sparse.go
generated
vendored
Normal file
1091
vendor/golang.org/x/tools/container/intsets/sparse.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
84
vendor/golang.org/x/tools/container/intsets/util.go
generated
vendored
Normal file
84
vendor/golang.org/x/tools/container/intsets/util.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package intsets
|
||||
|
||||
// From Hacker's Delight, fig 5.2.
|
||||
func popcountHD(x uint32) int {
|
||||
x -= (x >> 1) & 0x55555555
|
||||
x = (x & 0x33333333) + ((x >> 2) & 0x33333333)
|
||||
x = (x + (x >> 4)) & 0x0f0f0f0f
|
||||
x = x + (x >> 8)
|
||||
x = x + (x >> 16)
|
||||
return int(x & 0x0000003f)
|
||||
}
|
||||
|
||||
var a [1 << 8]byte
|
||||
|
||||
func init() {
|
||||
for i := range a {
|
||||
var n byte
|
||||
for x := i; x != 0; x >>= 1 {
|
||||
if x&1 != 0 {
|
||||
n++
|
||||
}
|
||||
}
|
||||
a[i] = n
|
||||
}
|
||||
}
|
||||
|
||||
func popcountTable(x word) int {
|
||||
return int(a[byte(x>>(0*8))] +
|
||||
a[byte(x>>(1*8))] +
|
||||
a[byte(x>>(2*8))] +
|
||||
a[byte(x>>(3*8))] +
|
||||
a[byte(x>>(4*8))] +
|
||||
a[byte(x>>(5*8))] +
|
||||
a[byte(x>>(6*8))] +
|
||||
a[byte(x>>(7*8))])
|
||||
}
|
||||
|
||||
// nlz returns the number of leading zeros of x.
|
||||
// From Hacker's Delight, fig 5.11.
|
||||
func nlz(x word) int {
|
||||
x |= (x >> 1)
|
||||
x |= (x >> 2)
|
||||
x |= (x >> 4)
|
||||
x |= (x >> 8)
|
||||
x |= (x >> 16)
|
||||
x |= (x >> 32)
|
||||
return popcount(^x)
|
||||
}
|
||||
|
||||
// ntz returns the number of trailing zeros of x.
|
||||
// From Hacker's Delight, fig 5.13.
|
||||
func ntz(x word) int {
|
||||
if x == 0 {
|
||||
return bitsPerWord
|
||||
}
|
||||
n := 1
|
||||
if bitsPerWord == 64 {
|
||||
if (x & 0xffffffff) == 0 {
|
||||
n = n + 32
|
||||
x = x >> 32
|
||||
}
|
||||
}
|
||||
if (x & 0x0000ffff) == 0 {
|
||||
n = n + 16
|
||||
x = x >> 16
|
||||
}
|
||||
if (x & 0x000000ff) == 0 {
|
||||
n = n + 8
|
||||
x = x >> 8
|
||||
}
|
||||
if (x & 0x0000000f) == 0 {
|
||||
n = n + 4
|
||||
x = x >> 4
|
||||
}
|
||||
if (x & 0x00000003) == 0 {
|
||||
n = n + 2
|
||||
x = x >> 2
|
||||
}
|
||||
return n - int(x&1)
|
||||
}
|
||||
627
vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
generated
vendored
Normal file
627
vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
generated
vendored
Normal file
@@ -0,0 +1,627 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package astutil
|
||||
|
||||
// This file defines utilities for working with source positions.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// PathEnclosingInterval returns the node that encloses the source
|
||||
// interval [start, end), and all its ancestors up to the AST root.
|
||||
//
|
||||
// The definition of "enclosing" used by this function considers
|
||||
// additional whitespace abutting a node to be enclosed by it.
|
||||
// In this example:
|
||||
//
|
||||
// z := x + y // add them
|
||||
// <-A->
|
||||
// <----B----->
|
||||
//
|
||||
// the ast.BinaryExpr(+) node is considered to enclose interval B
|
||||
// even though its [Pos()..End()) is actually only interval A.
|
||||
// This behaviour makes user interfaces more tolerant of imperfect
|
||||
// input.
|
||||
//
|
||||
// This function treats tokens as nodes, though they are not included
|
||||
// in the result. e.g. PathEnclosingInterval("+") returns the
|
||||
// enclosing ast.BinaryExpr("x + y").
|
||||
//
|
||||
// If start==end, the 1-char interval following start is used instead.
|
||||
//
|
||||
// The 'exact' result is true if the interval contains only path[0]
|
||||
// and perhaps some adjacent whitespace. It is false if the interval
|
||||
// overlaps multiple children of path[0], or if it contains only
|
||||
// interior whitespace of path[0].
|
||||
// In this example:
|
||||
//
|
||||
// z := x + y // add them
|
||||
// <--C--> <---E-->
|
||||
// ^
|
||||
// D
|
||||
//
|
||||
// intervals C, D and E are inexact. C is contained by the
|
||||
// z-assignment statement, because it spans three of its children (:=,
|
||||
// x, +). So too is the 1-char interval D, because it contains only
|
||||
// interior whitespace of the assignment. E is considered interior
|
||||
// whitespace of the BlockStmt containing the assignment.
|
||||
//
|
||||
// Precondition: [start, end) both lie within the same file as root.
|
||||
// TODO(adonovan): return (nil, false) in this case and remove precond.
|
||||
// Requires FileSet; see loader.tokenFileContainsPos.
|
||||
//
|
||||
// Postcondition: path is never nil; it always contains at least 'root'.
|
||||
//
|
||||
func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) {
|
||||
// fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging
|
||||
|
||||
// Precondition: node.[Pos..End) and adjoining whitespace contain [start, end).
|
||||
var visit func(node ast.Node) bool
|
||||
visit = func(node ast.Node) bool {
|
||||
path = append(path, node)
|
||||
|
||||
nodePos := node.Pos()
|
||||
nodeEnd := node.End()
|
||||
|
||||
// fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging
|
||||
|
||||
// Intersect [start, end) with interval of node.
|
||||
if start < nodePos {
|
||||
start = nodePos
|
||||
}
|
||||
if end > nodeEnd {
|
||||
end = nodeEnd
|
||||
}
|
||||
|
||||
// Find sole child that contains [start, end).
|
||||
children := childrenOf(node)
|
||||
l := len(children)
|
||||
for i, child := range children {
|
||||
// [childPos, childEnd) is unaugmented interval of child.
|
||||
childPos := child.Pos()
|
||||
childEnd := child.End()
|
||||
|
||||
// [augPos, augEnd) is whitespace-augmented interval of child.
|
||||
augPos := childPos
|
||||
augEnd := childEnd
|
||||
if i > 0 {
|
||||
augPos = children[i-1].End() // start of preceding whitespace
|
||||
}
|
||||
if i < l-1 {
|
||||
nextChildPos := children[i+1].Pos()
|
||||
// Does [start, end) lie between child and next child?
|
||||
if start >= augEnd && end <= nextChildPos {
|
||||
return false // inexact match
|
||||
}
|
||||
augEnd = nextChildPos // end of following whitespace
|
||||
}
|
||||
|
||||
// fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n",
|
||||
// i, augPos, augEnd, start, end) // debugging
|
||||
|
||||
// Does augmented child strictly contain [start, end)?
|
||||
if augPos <= start && end <= augEnd {
|
||||
_, isToken := child.(tokenNode)
|
||||
return isToken || visit(child)
|
||||
}
|
||||
|
||||
// Does [start, end) overlap multiple children?
|
||||
// i.e. left-augmented child contains start
|
||||
// but LR-augmented child does not contain end.
|
||||
if start < childEnd && end > augEnd {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// No single child contained [start, end),
|
||||
// so node is the result. Is it exact?
|
||||
|
||||
// (It's tempting to put this condition before the
|
||||
// child loop, but it gives the wrong result in the
|
||||
// case where a node (e.g. ExprStmt) and its sole
|
||||
// child have equal intervals.)
|
||||
if start == nodePos && end == nodeEnd {
|
||||
return true // exact match
|
||||
}
|
||||
|
||||
return false // inexact: overlaps multiple children
|
||||
}
|
||||
|
||||
if start > end {
|
||||
start, end = end, start
|
||||
}
|
||||
|
||||
if start < root.End() && end > root.Pos() {
|
||||
if start == end {
|
||||
end = start + 1 // empty interval => interval of size 1
|
||||
}
|
||||
exact = visit(root)
|
||||
|
||||
// Reverse the path:
|
||||
for i, l := 0, len(path); i < l/2; i++ {
|
||||
path[i], path[l-1-i] = path[l-1-i], path[i]
|
||||
}
|
||||
} else {
|
||||
// Selection lies within whitespace preceding the
|
||||
// first (or following the last) declaration in the file.
|
||||
// The result nonetheless always includes the ast.File.
|
||||
path = append(path, root)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// tokenNode is a dummy implementation of ast.Node for a single token.
|
||||
// They are used transiently by PathEnclosingInterval but never escape
|
||||
// this package.
|
||||
//
|
||||
type tokenNode struct {
|
||||
pos token.Pos
|
||||
end token.Pos
|
||||
}
|
||||
|
||||
func (n tokenNode) Pos() token.Pos {
|
||||
return n.pos
|
||||
}
|
||||
|
||||
func (n tokenNode) End() token.Pos {
|
||||
return n.end
|
||||
}
|
||||
|
||||
func tok(pos token.Pos, len int) ast.Node {
|
||||
return tokenNode{pos, pos + token.Pos(len)}
|
||||
}
|
||||
|
||||
// childrenOf returns the direct non-nil children of ast.Node n.
|
||||
// It may include fake ast.Node implementations for bare tokens.
|
||||
// it is not safe to call (e.g.) ast.Walk on such nodes.
|
||||
//
|
||||
func childrenOf(n ast.Node) []ast.Node {
|
||||
var children []ast.Node
|
||||
|
||||
// First add nodes for all true subtrees.
|
||||
ast.Inspect(n, func(node ast.Node) bool {
|
||||
if node == n { // push n
|
||||
return true // recur
|
||||
}
|
||||
if node != nil { // push child
|
||||
children = append(children, node)
|
||||
}
|
||||
return false // no recursion
|
||||
})
|
||||
|
||||
// Then add fake Nodes for bare tokens.
|
||||
switch n := n.(type) {
|
||||
case *ast.ArrayType:
|
||||
children = append(children,
|
||||
tok(n.Lbrack, len("[")),
|
||||
tok(n.Elt.End(), len("]")))
|
||||
|
||||
case *ast.AssignStmt:
|
||||
children = append(children,
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
|
||||
case *ast.BasicLit:
|
||||
children = append(children,
|
||||
tok(n.ValuePos, len(n.Value)))
|
||||
|
||||
case *ast.BinaryExpr:
|
||||
children = append(children, tok(n.OpPos, len(n.Op.String())))
|
||||
|
||||
case *ast.BlockStmt:
|
||||
children = append(children,
|
||||
tok(n.Lbrace, len("{")),
|
||||
tok(n.Rbrace, len("}")))
|
||||
|
||||
case *ast.BranchStmt:
|
||||
children = append(children,
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
|
||||
case *ast.CallExpr:
|
||||
children = append(children,
|
||||
tok(n.Lparen, len("(")),
|
||||
tok(n.Rparen, len(")")))
|
||||
if n.Ellipsis != 0 {
|
||||
children = append(children, tok(n.Ellipsis, len("...")))
|
||||
}
|
||||
|
||||
case *ast.CaseClause:
|
||||
if n.List == nil {
|
||||
children = append(children,
|
||||
tok(n.Case, len("default")))
|
||||
} else {
|
||||
children = append(children,
|
||||
tok(n.Case, len("case")))
|
||||
}
|
||||
children = append(children, tok(n.Colon, len(":")))
|
||||
|
||||
case *ast.ChanType:
|
||||
switch n.Dir {
|
||||
case ast.RECV:
|
||||
children = append(children, tok(n.Begin, len("<-chan")))
|
||||
case ast.SEND:
|
||||
children = append(children, tok(n.Begin, len("chan<-")))
|
||||
case ast.RECV | ast.SEND:
|
||||
children = append(children, tok(n.Begin, len("chan")))
|
||||
}
|
||||
|
||||
case *ast.CommClause:
|
||||
if n.Comm == nil {
|
||||
children = append(children,
|
||||
tok(n.Case, len("default")))
|
||||
} else {
|
||||
children = append(children,
|
||||
tok(n.Case, len("case")))
|
||||
}
|
||||
children = append(children, tok(n.Colon, len(":")))
|
||||
|
||||
case *ast.Comment:
|
||||
// nop
|
||||
|
||||
case *ast.CommentGroup:
|
||||
// nop
|
||||
|
||||
case *ast.CompositeLit:
|
||||
children = append(children,
|
||||
tok(n.Lbrace, len("{")),
|
||||
tok(n.Rbrace, len("{")))
|
||||
|
||||
case *ast.DeclStmt:
|
||||
// nop
|
||||
|
||||
case *ast.DeferStmt:
|
||||
children = append(children,
|
||||
tok(n.Defer, len("defer")))
|
||||
|
||||
case *ast.Ellipsis:
|
||||
children = append(children,
|
||||
tok(n.Ellipsis, len("...")))
|
||||
|
||||
case *ast.EmptyStmt:
|
||||
// nop
|
||||
|
||||
case *ast.ExprStmt:
|
||||
// nop
|
||||
|
||||
case *ast.Field:
|
||||
// TODO(adonovan): Field.{Doc,Comment,Tag}?
|
||||
|
||||
case *ast.FieldList:
|
||||
children = append(children,
|
||||
tok(n.Opening, len("(")),
|
||||
tok(n.Closing, len(")")))
|
||||
|
||||
case *ast.File:
|
||||
// TODO test: Doc
|
||||
children = append(children,
|
||||
tok(n.Package, len("package")))
|
||||
|
||||
case *ast.ForStmt:
|
||||
children = append(children,
|
||||
tok(n.For, len("for")))
|
||||
|
||||
case *ast.FuncDecl:
|
||||
// TODO(adonovan): FuncDecl.Comment?
|
||||
|
||||
// Uniquely, FuncDecl breaks the invariant that
|
||||
// preorder traversal yields tokens in lexical order:
|
||||
// in fact, FuncDecl.Recv precedes FuncDecl.Type.Func.
|
||||
//
|
||||
// As a workaround, we inline the case for FuncType
|
||||
// here and order things correctly.
|
||||
//
|
||||
children = nil // discard ast.Walk(FuncDecl) info subtrees
|
||||
children = append(children, tok(n.Type.Func, len("func")))
|
||||
if n.Recv != nil {
|
||||
children = append(children, n.Recv)
|
||||
}
|
||||
children = append(children, n.Name)
|
||||
if n.Type.Params != nil {
|
||||
children = append(children, n.Type.Params)
|
||||
}
|
||||
if n.Type.Results != nil {
|
||||
children = append(children, n.Type.Results)
|
||||
}
|
||||
if n.Body != nil {
|
||||
children = append(children, n.Body)
|
||||
}
|
||||
|
||||
case *ast.FuncLit:
|
||||
// nop
|
||||
|
||||
case *ast.FuncType:
|
||||
if n.Func != 0 {
|
||||
children = append(children,
|
||||
tok(n.Func, len("func")))
|
||||
}
|
||||
|
||||
case *ast.GenDecl:
|
||||
children = append(children,
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
if n.Lparen != 0 {
|
||||
children = append(children,
|
||||
tok(n.Lparen, len("(")),
|
||||
tok(n.Rparen, len(")")))
|
||||
}
|
||||
|
||||
case *ast.GoStmt:
|
||||
children = append(children,
|
||||
tok(n.Go, len("go")))
|
||||
|
||||
case *ast.Ident:
|
||||
children = append(children,
|
||||
tok(n.NamePos, len(n.Name)))
|
||||
|
||||
case *ast.IfStmt:
|
||||
children = append(children,
|
||||
tok(n.If, len("if")))
|
||||
|
||||
case *ast.ImportSpec:
|
||||
// TODO(adonovan): ImportSpec.{Doc,EndPos}?
|
||||
|
||||
case *ast.IncDecStmt:
|
||||
children = append(children,
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
|
||||
case *ast.IndexExpr:
|
||||
children = append(children,
|
||||
tok(n.Lbrack, len("{")),
|
||||
tok(n.Rbrack, len("}")))
|
||||
|
||||
case *ast.InterfaceType:
|
||||
children = append(children,
|
||||
tok(n.Interface, len("interface")))
|
||||
|
||||
case *ast.KeyValueExpr:
|
||||
children = append(children,
|
||||
tok(n.Colon, len(":")))
|
||||
|
||||
case *ast.LabeledStmt:
|
||||
children = append(children,
|
||||
tok(n.Colon, len(":")))
|
||||
|
||||
case *ast.MapType:
|
||||
children = append(children,
|
||||
tok(n.Map, len("map")))
|
||||
|
||||
case *ast.ParenExpr:
|
||||
children = append(children,
|
||||
tok(n.Lparen, len("(")),
|
||||
tok(n.Rparen, len(")")))
|
||||
|
||||
case *ast.RangeStmt:
|
||||
children = append(children,
|
||||
tok(n.For, len("for")),
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
|
||||
case *ast.ReturnStmt:
|
||||
children = append(children,
|
||||
tok(n.Return, len("return")))
|
||||
|
||||
case *ast.SelectStmt:
|
||||
children = append(children,
|
||||
tok(n.Select, len("select")))
|
||||
|
||||
case *ast.SelectorExpr:
|
||||
// nop
|
||||
|
||||
case *ast.SendStmt:
|
||||
children = append(children,
|
||||
tok(n.Arrow, len("<-")))
|
||||
|
||||
case *ast.SliceExpr:
|
||||
children = append(children,
|
||||
tok(n.Lbrack, len("[")),
|
||||
tok(n.Rbrack, len("]")))
|
||||
|
||||
case *ast.StarExpr:
|
||||
children = append(children, tok(n.Star, len("*")))
|
||||
|
||||
case *ast.StructType:
|
||||
children = append(children, tok(n.Struct, len("struct")))
|
||||
|
||||
case *ast.SwitchStmt:
|
||||
children = append(children, tok(n.Switch, len("switch")))
|
||||
|
||||
case *ast.TypeAssertExpr:
|
||||
children = append(children,
|
||||
tok(n.Lparen-1, len(".")),
|
||||
tok(n.Lparen, len("(")),
|
||||
tok(n.Rparen, len(")")))
|
||||
|
||||
case *ast.TypeSpec:
|
||||
// TODO(adonovan): TypeSpec.{Doc,Comment}?
|
||||
|
||||
case *ast.TypeSwitchStmt:
|
||||
children = append(children, tok(n.Switch, len("switch")))
|
||||
|
||||
case *ast.UnaryExpr:
|
||||
children = append(children, tok(n.OpPos, len(n.Op.String())))
|
||||
|
||||
case *ast.ValueSpec:
|
||||
// TODO(adonovan): ValueSpec.{Doc,Comment}?
|
||||
|
||||
case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
|
||||
// nop
|
||||
}
|
||||
|
||||
// TODO(adonovan): opt: merge the logic of ast.Inspect() into
|
||||
// the switch above so we can make interleaved callbacks for
|
||||
// both Nodes and Tokens in the right order and avoid the need
|
||||
// to sort.
|
||||
sort.Sort(byPos(children))
|
||||
|
||||
return children
|
||||
}
|
||||
|
||||
type byPos []ast.Node
|
||||
|
||||
func (sl byPos) Len() int {
|
||||
return len(sl)
|
||||
}
|
||||
func (sl byPos) Less(i, j int) bool {
|
||||
return sl[i].Pos() < sl[j].Pos()
|
||||
}
|
||||
func (sl byPos) Swap(i, j int) {
|
||||
sl[i], sl[j] = sl[j], sl[i]
|
||||
}
|
||||
|
||||
// NodeDescription returns a description of the concrete type of n suitable
|
||||
// for a user interface.
|
||||
//
|
||||
// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident,
|
||||
// StarExpr) we could be much more specific given the path to the AST
|
||||
// root. Perhaps we should do that.
|
||||
//
|
||||
func NodeDescription(n ast.Node) string {
|
||||
switch n := n.(type) {
|
||||
case *ast.ArrayType:
|
||||
return "array type"
|
||||
case *ast.AssignStmt:
|
||||
return "assignment"
|
||||
case *ast.BadDecl:
|
||||
return "bad declaration"
|
||||
case *ast.BadExpr:
|
||||
return "bad expression"
|
||||
case *ast.BadStmt:
|
||||
return "bad statement"
|
||||
case *ast.BasicLit:
|
||||
return "basic literal"
|
||||
case *ast.BinaryExpr:
|
||||
return fmt.Sprintf("binary %s operation", n.Op)
|
||||
case *ast.BlockStmt:
|
||||
return "block"
|
||||
case *ast.BranchStmt:
|
||||
switch n.Tok {
|
||||
case token.BREAK:
|
||||
return "break statement"
|
||||
case token.CONTINUE:
|
||||
return "continue statement"
|
||||
case token.GOTO:
|
||||
return "goto statement"
|
||||
case token.FALLTHROUGH:
|
||||
return "fall-through statement"
|
||||
}
|
||||
case *ast.CallExpr:
|
||||
if len(n.Args) == 1 && !n.Ellipsis.IsValid() {
|
||||
return "function call (or conversion)"
|
||||
}
|
||||
return "function call"
|
||||
case *ast.CaseClause:
|
||||
return "case clause"
|
||||
case *ast.ChanType:
|
||||
return "channel type"
|
||||
case *ast.CommClause:
|
||||
return "communication clause"
|
||||
case *ast.Comment:
|
||||
return "comment"
|
||||
case *ast.CommentGroup:
|
||||
return "comment group"
|
||||
case *ast.CompositeLit:
|
||||
return "composite literal"
|
||||
case *ast.DeclStmt:
|
||||
return NodeDescription(n.Decl) + " statement"
|
||||
case *ast.DeferStmt:
|
||||
return "defer statement"
|
||||
case *ast.Ellipsis:
|
||||
return "ellipsis"
|
||||
case *ast.EmptyStmt:
|
||||
return "empty statement"
|
||||
case *ast.ExprStmt:
|
||||
return "expression statement"
|
||||
case *ast.Field:
|
||||
// Can be any of these:
|
||||
// struct {x, y int} -- struct field(s)
|
||||
// struct {T} -- anon struct field
|
||||
// interface {I} -- interface embedding
|
||||
// interface {f()} -- interface method
|
||||
// func (A) func(B) C -- receiver, param(s), result(s)
|
||||
return "field/method/parameter"
|
||||
case *ast.FieldList:
|
||||
return "field/method/parameter list"
|
||||
case *ast.File:
|
||||
return "source file"
|
||||
case *ast.ForStmt:
|
||||
return "for loop"
|
||||
case *ast.FuncDecl:
|
||||
return "function declaration"
|
||||
case *ast.FuncLit:
|
||||
return "function literal"
|
||||
case *ast.FuncType:
|
||||
return "function type"
|
||||
case *ast.GenDecl:
|
||||
switch n.Tok {
|
||||
case token.IMPORT:
|
||||
return "import declaration"
|
||||
case token.CONST:
|
||||
return "constant declaration"
|
||||
case token.TYPE:
|
||||
return "type declaration"
|
||||
case token.VAR:
|
||||
return "variable declaration"
|
||||
}
|
||||
case *ast.GoStmt:
|
||||
return "go statement"
|
||||
case *ast.Ident:
|
||||
return "identifier"
|
||||
case *ast.IfStmt:
|
||||
return "if statement"
|
||||
case *ast.ImportSpec:
|
||||
return "import specification"
|
||||
case *ast.IncDecStmt:
|
||||
if n.Tok == token.INC {
|
||||
return "increment statement"
|
||||
}
|
||||
return "decrement statement"
|
||||
case *ast.IndexExpr:
|
||||
return "index expression"
|
||||
case *ast.InterfaceType:
|
||||
return "interface type"
|
||||
case *ast.KeyValueExpr:
|
||||
return "key/value association"
|
||||
case *ast.LabeledStmt:
|
||||
return "statement label"
|
||||
case *ast.MapType:
|
||||
return "map type"
|
||||
case *ast.Package:
|
||||
return "package"
|
||||
case *ast.ParenExpr:
|
||||
return "parenthesized " + NodeDescription(n.X)
|
||||
case *ast.RangeStmt:
|
||||
return "range loop"
|
||||
case *ast.ReturnStmt:
|
||||
return "return statement"
|
||||
case *ast.SelectStmt:
|
||||
return "select statement"
|
||||
case *ast.SelectorExpr:
|
||||
return "selector"
|
||||
case *ast.SendStmt:
|
||||
return "channel send"
|
||||
case *ast.SliceExpr:
|
||||
return "slice expression"
|
||||
case *ast.StarExpr:
|
||||
return "*-operation" // load/store expr or pointer type
|
||||
case *ast.StructType:
|
||||
return "struct type"
|
||||
case *ast.SwitchStmt:
|
||||
return "switch statement"
|
||||
case *ast.TypeAssertExpr:
|
||||
return "type assertion"
|
||||
case *ast.TypeSpec:
|
||||
return "type specification"
|
||||
case *ast.TypeSwitchStmt:
|
||||
return "type switch"
|
||||
case *ast.UnaryExpr:
|
||||
return fmt.Sprintf("unary %s operation", n.Op)
|
||||
case *ast.ValueSpec:
|
||||
return "value specification"
|
||||
|
||||
}
|
||||
panic(fmt.Sprintf("unexpected node type: %T", n))
|
||||
}
|
||||
481
vendor/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
Normal file
481
vendor/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
Normal file
@@ -0,0 +1,481 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package astutil contains common utilities for working with the Go AST.
|
||||
package astutil // import "golang.org/x/tools/go/ast/astutil"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// AddImport adds the import path to the file f, if absent.
|
||||
func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) {
|
||||
return AddNamedImport(fset, f, "", path)
|
||||
}
|
||||
|
||||
// AddNamedImport adds the import with the given name and path to the file f, if absent.
|
||||
// If name is not empty, it is used to rename the import.
|
||||
//
|
||||
// For example, calling
|
||||
// AddNamedImport(fset, f, "pathpkg", "path")
|
||||
// adds
|
||||
// import pathpkg "path"
|
||||
func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) {
|
||||
if imports(f, name, path) {
|
||||
return false
|
||||
}
|
||||
|
||||
newImport := &ast.ImportSpec{
|
||||
Path: &ast.BasicLit{
|
||||
Kind: token.STRING,
|
||||
Value: strconv.Quote(path),
|
||||
},
|
||||
}
|
||||
if name != "" {
|
||||
newImport.Name = &ast.Ident{Name: name}
|
||||
}
|
||||
|
||||
// Find an import decl to add to.
|
||||
// The goal is to find an existing import
|
||||
// whose import path has the longest shared
|
||||
// prefix with path.
|
||||
var (
|
||||
bestMatch = -1 // length of longest shared prefix
|
||||
lastImport = -1 // index in f.Decls of the file's final import decl
|
||||
impDecl *ast.GenDecl // import decl containing the best match
|
||||
impIndex = -1 // spec index in impDecl containing the best match
|
||||
|
||||
isThirdPartyPath = isThirdParty(path)
|
||||
)
|
||||
for i, decl := range f.Decls {
|
||||
gen, ok := decl.(*ast.GenDecl)
|
||||
if ok && gen.Tok == token.IMPORT {
|
||||
lastImport = i
|
||||
// Do not add to import "C", to avoid disrupting the
|
||||
// association with its doc comment, breaking cgo.
|
||||
if declImports(gen, "C") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Match an empty import decl if that's all that is available.
|
||||
if len(gen.Specs) == 0 && bestMatch == -1 {
|
||||
impDecl = gen
|
||||
}
|
||||
|
||||
// Compute longest shared prefix with imports in this group and find best
|
||||
// matched import spec.
|
||||
// 1. Always prefer import spec with longest shared prefix.
|
||||
// 2. While match length is 0,
|
||||
// - for stdlib package: prefer first import spec.
|
||||
// - for third party package: prefer first third party import spec.
|
||||
// We cannot use last import spec as best match for third party package
|
||||
// because grouped imports are usually placed last by goimports -local
|
||||
// flag.
|
||||
// See issue #19190.
|
||||
seenAnyThirdParty := false
|
||||
for j, spec := range gen.Specs {
|
||||
impspec := spec.(*ast.ImportSpec)
|
||||
p := importPath(impspec)
|
||||
n := matchLen(p, path)
|
||||
if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) {
|
||||
bestMatch = n
|
||||
impDecl = gen
|
||||
impIndex = j
|
||||
}
|
||||
seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no import decl found, add one after the last import.
|
||||
if impDecl == nil {
|
||||
impDecl = &ast.GenDecl{
|
||||
Tok: token.IMPORT,
|
||||
}
|
||||
if lastImport >= 0 {
|
||||
impDecl.TokPos = f.Decls[lastImport].End()
|
||||
} else {
|
||||
// There are no existing imports.
|
||||
// Our new import, preceded by a blank line, goes after the package declaration
|
||||
// and after the comment, if any, that starts on the same line as the
|
||||
// package declaration.
|
||||
impDecl.TokPos = f.Package
|
||||
|
||||
file := fset.File(f.Package)
|
||||
pkgLine := file.Line(f.Package)
|
||||
for _, c := range f.Comments {
|
||||
if file.Line(c.Pos()) > pkgLine {
|
||||
break
|
||||
}
|
||||
// +2 for a blank line
|
||||
impDecl.TokPos = c.End() + 2
|
||||
}
|
||||
}
|
||||
f.Decls = append(f.Decls, nil)
|
||||
copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
|
||||
f.Decls[lastImport+1] = impDecl
|
||||
}
|
||||
|
||||
// Insert new import at insertAt.
|
||||
insertAt := 0
|
||||
if impIndex >= 0 {
|
||||
// insert after the found import
|
||||
insertAt = impIndex + 1
|
||||
}
|
||||
impDecl.Specs = append(impDecl.Specs, nil)
|
||||
copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
|
||||
impDecl.Specs[insertAt] = newImport
|
||||
pos := impDecl.Pos()
|
||||
if insertAt > 0 {
|
||||
// If there is a comment after an existing import, preserve the comment
|
||||
// position by adding the new import after the comment.
|
||||
if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil {
|
||||
pos = spec.Comment.End()
|
||||
} else {
|
||||
// Assign same position as the previous import,
|
||||
// so that the sorter sees it as being in the same block.
|
||||
pos = impDecl.Specs[insertAt-1].Pos()
|
||||
}
|
||||
}
|
||||
if newImport.Name != nil {
|
||||
newImport.Name.NamePos = pos
|
||||
}
|
||||
newImport.Path.ValuePos = pos
|
||||
newImport.EndPos = pos
|
||||
|
||||
// Clean up parens. impDecl contains at least one spec.
|
||||
if len(impDecl.Specs) == 1 {
|
||||
// Remove unneeded parens.
|
||||
impDecl.Lparen = token.NoPos
|
||||
} else if !impDecl.Lparen.IsValid() {
|
||||
// impDecl needs parens added.
|
||||
impDecl.Lparen = impDecl.Specs[0].Pos()
|
||||
}
|
||||
|
||||
f.Imports = append(f.Imports, newImport)
|
||||
|
||||
if len(f.Decls) <= 1 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Merge all the import declarations into the first one.
|
||||
var first *ast.GenDecl
|
||||
for i := 0; i < len(f.Decls); i++ {
|
||||
decl := f.Decls[i]
|
||||
gen, ok := decl.(*ast.GenDecl)
|
||||
if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
|
||||
continue
|
||||
}
|
||||
if first == nil {
|
||||
first = gen
|
||||
continue // Don't touch the first one.
|
||||
}
|
||||
// We now know there is more than one package in this import
|
||||
// declaration. Ensure that it ends up parenthesized.
|
||||
first.Lparen = first.Pos()
|
||||
// Move the imports of the other import declaration to the first one.
|
||||
for _, spec := range gen.Specs {
|
||||
spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
|
||||
first.Specs = append(first.Specs, spec)
|
||||
}
|
||||
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
|
||||
i--
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func isThirdParty(importPath string) bool {
|
||||
// Third party package import path usually contains "." (".com", ".org", ...)
|
||||
// This logic is taken from golang.org/x/tools/imports package.
|
||||
return strings.Contains(importPath, ".")
|
||||
}
|
||||
|
||||
// DeleteImport deletes the import path from the file f, if present.
|
||||
// If there are duplicate import declarations, all matching ones are deleted.
|
||||
func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
|
||||
return DeleteNamedImport(fset, f, "", path)
|
||||
}
|
||||
|
||||
// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
|
||||
// If there are duplicate import declarations, all matching ones are deleted.
|
||||
func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
|
||||
var delspecs []*ast.ImportSpec
|
||||
var delcomments []*ast.CommentGroup
|
||||
|
||||
// Find the import nodes that import path, if any.
|
||||
for i := 0; i < len(f.Decls); i++ {
|
||||
decl := f.Decls[i]
|
||||
gen, ok := decl.(*ast.GenDecl)
|
||||
if !ok || gen.Tok != token.IMPORT {
|
||||
continue
|
||||
}
|
||||
for j := 0; j < len(gen.Specs); j++ {
|
||||
spec := gen.Specs[j]
|
||||
impspec := spec.(*ast.ImportSpec)
|
||||
if importName(impspec) != name || importPath(impspec) != path {
|
||||
continue
|
||||
}
|
||||
|
||||
// We found an import spec that imports path.
|
||||
// Delete it.
|
||||
delspecs = append(delspecs, impspec)
|
||||
deleted = true
|
||||
copy(gen.Specs[j:], gen.Specs[j+1:])
|
||||
gen.Specs = gen.Specs[:len(gen.Specs)-1]
|
||||
|
||||
// If this was the last import spec in this decl,
|
||||
// delete the decl, too.
|
||||
if len(gen.Specs) == 0 {
|
||||
copy(f.Decls[i:], f.Decls[i+1:])
|
||||
f.Decls = f.Decls[:len(f.Decls)-1]
|
||||
i--
|
||||
break
|
||||
} else if len(gen.Specs) == 1 {
|
||||
if impspec.Doc != nil {
|
||||
delcomments = append(delcomments, impspec.Doc)
|
||||
}
|
||||
if impspec.Comment != nil {
|
||||
delcomments = append(delcomments, impspec.Comment)
|
||||
}
|
||||
for _, cg := range f.Comments {
|
||||
// Found comment on the same line as the import spec.
|
||||
if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line {
|
||||
delcomments = append(delcomments, cg)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
spec := gen.Specs[0].(*ast.ImportSpec)
|
||||
|
||||
// Move the documentation right after the import decl.
|
||||
if spec.Doc != nil {
|
||||
for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line {
|
||||
fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
|
||||
}
|
||||
}
|
||||
for _, cg := range f.Comments {
|
||||
if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line {
|
||||
for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line {
|
||||
fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if j > 0 {
|
||||
lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
|
||||
lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
|
||||
line := fset.Position(impspec.Path.ValuePos).Line
|
||||
|
||||
// We deleted an entry but now there may be
|
||||
// a blank line-sized hole where the import was.
|
||||
if line-lastLine > 1 {
|
||||
// There was a blank line immediately preceding the deleted import,
|
||||
// so there's no need to close the hole.
|
||||
// Do nothing.
|
||||
} else if line != fset.File(gen.Rparen).LineCount() {
|
||||
// There was no blank line. Close the hole.
|
||||
fset.File(gen.Rparen).MergeLine(line)
|
||||
}
|
||||
}
|
||||
j--
|
||||
}
|
||||
}
|
||||
|
||||
// Delete imports from f.Imports.
|
||||
for i := 0; i < len(f.Imports); i++ {
|
||||
imp := f.Imports[i]
|
||||
for j, del := range delspecs {
|
||||
if imp == del {
|
||||
copy(f.Imports[i:], f.Imports[i+1:])
|
||||
f.Imports = f.Imports[:len(f.Imports)-1]
|
||||
copy(delspecs[j:], delspecs[j+1:])
|
||||
delspecs = delspecs[:len(delspecs)-1]
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete comments from f.Comments.
|
||||
for i := 0; i < len(f.Comments); i++ {
|
||||
cg := f.Comments[i]
|
||||
for j, del := range delcomments {
|
||||
if cg == del {
|
||||
copy(f.Comments[i:], f.Comments[i+1:])
|
||||
f.Comments = f.Comments[:len(f.Comments)-1]
|
||||
copy(delcomments[j:], delcomments[j+1:])
|
||||
delcomments = delcomments[:len(delcomments)-1]
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(delspecs) > 0 {
|
||||
panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// RewriteImport rewrites any import of path oldPath to path newPath.
|
||||
func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) {
|
||||
for _, imp := range f.Imports {
|
||||
if importPath(imp) == oldPath {
|
||||
rewrote = true
|
||||
// record old End, because the default is to compute
|
||||
// it using the length of imp.Path.Value.
|
||||
imp.EndPos = imp.End()
|
||||
imp.Path.Value = strconv.Quote(newPath)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UsesImport reports whether a given import is used.
|
||||
func UsesImport(f *ast.File, path string) (used bool) {
|
||||
spec := importSpec(f, path)
|
||||
if spec == nil {
|
||||
return
|
||||
}
|
||||
|
||||
name := spec.Name.String()
|
||||
switch name {
|
||||
case "<nil>":
|
||||
// If the package name is not explicitly specified,
|
||||
// make an educated guess. This is not guaranteed to be correct.
|
||||
lastSlash := strings.LastIndex(path, "/")
|
||||
if lastSlash == -1 {
|
||||
name = path
|
||||
} else {
|
||||
name = path[lastSlash+1:]
|
||||
}
|
||||
case "_", ".":
|
||||
// Not sure if this import is used - err on the side of caution.
|
||||
return true
|
||||
}
|
||||
|
||||
ast.Walk(visitFn(func(n ast.Node) {
|
||||
sel, ok := n.(*ast.SelectorExpr)
|
||||
if ok && isTopName(sel.X, name) {
|
||||
used = true
|
||||
}
|
||||
}), f)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type visitFn func(node ast.Node)
|
||||
|
||||
func (fn visitFn) Visit(node ast.Node) ast.Visitor {
|
||||
fn(node)
|
||||
return fn
|
||||
}
|
||||
|
||||
// imports reports whether f has an import with the specified name and path.
|
||||
func imports(f *ast.File, name, path string) bool {
|
||||
for _, s := range f.Imports {
|
||||
if importName(s) == name && importPath(s) == path {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// importSpec returns the import spec if f imports path,
|
||||
// or nil otherwise.
|
||||
func importSpec(f *ast.File, path string) *ast.ImportSpec {
|
||||
for _, s := range f.Imports {
|
||||
if importPath(s) == path {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// importName returns the name of s,
|
||||
// or "" if the import is not named.
|
||||
func importName(s *ast.ImportSpec) string {
|
||||
if s.Name == nil {
|
||||
return ""
|
||||
}
|
||||
return s.Name.Name
|
||||
}
|
||||
|
||||
// importPath returns the unquoted import path of s,
|
||||
// or "" if the path is not properly quoted.
|
||||
func importPath(s *ast.ImportSpec) string {
|
||||
t, err := strconv.Unquote(s.Path.Value)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// declImports reports whether gen contains an import of path.
|
||||
func declImports(gen *ast.GenDecl, path string) bool {
|
||||
if gen.Tok != token.IMPORT {
|
||||
return false
|
||||
}
|
||||
for _, spec := range gen.Specs {
|
||||
impspec := spec.(*ast.ImportSpec)
|
||||
if importPath(impspec) == path {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// matchLen returns the length of the longest path segment prefix shared by x and y.
|
||||
func matchLen(x, y string) int {
|
||||
n := 0
|
||||
for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ {
|
||||
if x[i] == '/' {
|
||||
n++
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// isTopName returns true if n is a top-level unresolved identifier with the given name.
|
||||
func isTopName(n ast.Expr, name string) bool {
|
||||
id, ok := n.(*ast.Ident)
|
||||
return ok && id.Name == name && id.Obj == nil
|
||||
}
|
||||
|
||||
// Imports returns the file imports grouped by paragraph.
|
||||
func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec {
|
||||
var groups [][]*ast.ImportSpec
|
||||
|
||||
for _, decl := range f.Decls {
|
||||
genDecl, ok := decl.(*ast.GenDecl)
|
||||
if !ok || genDecl.Tok != token.IMPORT {
|
||||
break
|
||||
}
|
||||
|
||||
group := []*ast.ImportSpec{}
|
||||
|
||||
var lastLine int
|
||||
for _, spec := range genDecl.Specs {
|
||||
importSpec := spec.(*ast.ImportSpec)
|
||||
pos := importSpec.Path.ValuePos
|
||||
line := fset.Position(pos).Line
|
||||
if lastLine > 0 && pos > 0 && line-lastLine > 1 {
|
||||
groups = append(groups, group)
|
||||
group = []*ast.ImportSpec{}
|
||||
}
|
||||
group = append(group, importSpec)
|
||||
lastLine = line
|
||||
}
|
||||
groups = append(groups, group)
|
||||
}
|
||||
|
||||
return groups
|
||||
}
|
||||
477
vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
generated
vendored
Normal file
477
vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
generated
vendored
Normal file
@@ -0,0 +1,477 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package astutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// An ApplyFunc is invoked by Apply for each node n, even if n is nil,
|
||||
// before and/or after the node's children, using a Cursor describing
|
||||
// the current node and providing operations on it.
|
||||
//
|
||||
// The return value of ApplyFunc controls the syntax tree traversal.
|
||||
// See Apply for details.
|
||||
type ApplyFunc func(*Cursor) bool
|
||||
|
||||
// Apply traverses a syntax tree recursively, starting with root,
|
||||
// and calling pre and post for each node as described below.
|
||||
// Apply returns the syntax tree, possibly modified.
|
||||
//
|
||||
// If pre is not nil, it is called for each node before the node's
|
||||
// children are traversed (pre-order). If pre returns false, no
|
||||
// children are traversed, and post is not called for that node.
|
||||
//
|
||||
// If post is not nil, and a prior call of pre didn't return false,
|
||||
// post is called for each node after its children are traversed
|
||||
// (post-order). If post returns false, traversal is terminated and
|
||||
// Apply returns immediately.
|
||||
//
|
||||
// Only fields that refer to AST nodes are considered children;
|
||||
// i.e., token.Pos, Scopes, Objects, and fields of basic types
|
||||
// (strings, etc.) are ignored.
|
||||
//
|
||||
// Children are traversed in the order in which they appear in the
|
||||
// respective node's struct definition. A package's files are
|
||||
// traversed in the filenames' alphabetical order.
|
||||
//
|
||||
func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) {
|
||||
parent := &struct{ ast.Node }{root}
|
||||
defer func() {
|
||||
if r := recover(); r != nil && r != abort {
|
||||
panic(r)
|
||||
}
|
||||
result = parent.Node
|
||||
}()
|
||||
a := &application{pre: pre, post: post}
|
||||
a.apply(parent, "Node", nil, root)
|
||||
return
|
||||
}
|
||||
|
||||
var abort = new(int) // singleton, to signal termination of Apply
|
||||
|
||||
// A Cursor describes a node encountered during Apply.
|
||||
// Information about the node and its parent is available
|
||||
// from the Node, Parent, Name, and Index methods.
|
||||
//
|
||||
// If p is a variable of type and value of the current parent node
|
||||
// c.Parent(), and f is the field identifier with name c.Name(),
|
||||
// the following invariants hold:
|
||||
//
|
||||
// p.f == c.Node() if c.Index() < 0
|
||||
// p.f[c.Index()] == c.Node() if c.Index() >= 0
|
||||
//
|
||||
// The methods Replace, Delete, InsertBefore, and InsertAfter
|
||||
// can be used to change the AST without disrupting Apply.
|
||||
type Cursor struct {
|
||||
parent ast.Node
|
||||
name string
|
||||
iter *iterator // valid if non-nil
|
||||
node ast.Node
|
||||
}
|
||||
|
||||
// Node returns the current Node.
|
||||
func (c *Cursor) Node() ast.Node { return c.node }
|
||||
|
||||
// Parent returns the parent of the current Node.
|
||||
func (c *Cursor) Parent() ast.Node { return c.parent }
|
||||
|
||||
// Name returns the name of the parent Node field that contains the current Node.
|
||||
// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns
|
||||
// the filename for the current Node.
|
||||
func (c *Cursor) Name() string { return c.name }
|
||||
|
||||
// Index reports the index >= 0 of the current Node in the slice of Nodes that
|
||||
// contains it, or a value < 0 if the current Node is not part of a slice.
|
||||
// The index of the current node changes if InsertBefore is called while
|
||||
// processing the current node.
|
||||
func (c *Cursor) Index() int {
|
||||
if c.iter != nil {
|
||||
return c.iter.index
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// field returns the current node's parent field value.
|
||||
func (c *Cursor) field() reflect.Value {
|
||||
return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name)
|
||||
}
|
||||
|
||||
// Replace replaces the current Node with n.
|
||||
// The replacement node is not walked by Apply.
|
||||
func (c *Cursor) Replace(n ast.Node) {
|
||||
if _, ok := c.node.(*ast.File); ok {
|
||||
file, ok := n.(*ast.File)
|
||||
if !ok {
|
||||
panic("attempt to replace *ast.File with non-*ast.File")
|
||||
}
|
||||
c.parent.(*ast.Package).Files[c.name] = file
|
||||
return
|
||||
}
|
||||
|
||||
v := c.field()
|
||||
if i := c.Index(); i >= 0 {
|
||||
v = v.Index(i)
|
||||
}
|
||||
v.Set(reflect.ValueOf(n))
|
||||
}
|
||||
|
||||
// Delete deletes the current Node from its containing slice.
|
||||
// If the current Node is not part of a slice, Delete panics.
|
||||
// As a special case, if the current node is a package file,
|
||||
// Delete removes it from the package's Files map.
|
||||
func (c *Cursor) Delete() {
|
||||
if _, ok := c.node.(*ast.File); ok {
|
||||
delete(c.parent.(*ast.Package).Files, c.name)
|
||||
return
|
||||
}
|
||||
|
||||
i := c.Index()
|
||||
if i < 0 {
|
||||
panic("Delete node not contained in slice")
|
||||
}
|
||||
v := c.field()
|
||||
l := v.Len()
|
||||
reflect.Copy(v.Slice(i, l), v.Slice(i+1, l))
|
||||
v.Index(l - 1).Set(reflect.Zero(v.Type().Elem()))
|
||||
v.SetLen(l - 1)
|
||||
c.iter.step--
|
||||
}
|
||||
|
||||
// InsertAfter inserts n after the current Node in its containing slice.
|
||||
// If the current Node is not part of a slice, InsertAfter panics.
|
||||
// Apply does not walk n.
|
||||
func (c *Cursor) InsertAfter(n ast.Node) {
|
||||
i := c.Index()
|
||||
if i < 0 {
|
||||
panic("InsertAfter node not contained in slice")
|
||||
}
|
||||
v := c.field()
|
||||
v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
|
||||
l := v.Len()
|
||||
reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l))
|
||||
v.Index(i + 1).Set(reflect.ValueOf(n))
|
||||
c.iter.step++
|
||||
}
|
||||
|
||||
// InsertBefore inserts n before the current Node in its containing slice.
|
||||
// If the current Node is not part of a slice, InsertBefore panics.
|
||||
// Apply will not walk n.
|
||||
func (c *Cursor) InsertBefore(n ast.Node) {
|
||||
i := c.Index()
|
||||
if i < 0 {
|
||||
panic("InsertBefore node not contained in slice")
|
||||
}
|
||||
v := c.field()
|
||||
v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
|
||||
l := v.Len()
|
||||
reflect.Copy(v.Slice(i+1, l), v.Slice(i, l))
|
||||
v.Index(i).Set(reflect.ValueOf(n))
|
||||
c.iter.index++
|
||||
}
|
||||
|
||||
// application carries all the shared data so we can pass it around cheaply.
|
||||
type application struct {
|
||||
pre, post ApplyFunc
|
||||
cursor Cursor
|
||||
iter iterator
|
||||
}
|
||||
|
||||
func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) {
|
||||
// convert typed nil into untyped nil
|
||||
if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
n = nil
|
||||
}
|
||||
|
||||
// avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead
|
||||
saved := a.cursor
|
||||
a.cursor.parent = parent
|
||||
a.cursor.name = name
|
||||
a.cursor.iter = iter
|
||||
a.cursor.node = n
|
||||
|
||||
if a.pre != nil && !a.pre(&a.cursor) {
|
||||
a.cursor = saved
|
||||
return
|
||||
}
|
||||
|
||||
// walk children
|
||||
// (the order of the cases matches the order of the corresponding node types in go/ast)
|
||||
switch n := n.(type) {
|
||||
case nil:
|
||||
// nothing to do
|
||||
|
||||
// Comments and fields
|
||||
case *ast.Comment:
|
||||
// nothing to do
|
||||
|
||||
case *ast.CommentGroup:
|
||||
if n != nil {
|
||||
a.applyList(n, "List")
|
||||
}
|
||||
|
||||
case *ast.Field:
|
||||
a.apply(n, "Doc", nil, n.Doc)
|
||||
a.applyList(n, "Names")
|
||||
a.apply(n, "Type", nil, n.Type)
|
||||
a.apply(n, "Tag", nil, n.Tag)
|
||||
a.apply(n, "Comment", nil, n.Comment)
|
||||
|
||||
case *ast.FieldList:
|
||||
a.applyList(n, "List")
|
||||
|
||||
// Expressions
|
||||
case *ast.BadExpr, *ast.Ident, *ast.BasicLit:
|
||||
// nothing to do
|
||||
|
||||
case *ast.Ellipsis:
|
||||
a.apply(n, "Elt", nil, n.Elt)
|
||||
|
||||
case *ast.FuncLit:
|
||||
a.apply(n, "Type", nil, n.Type)
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
|
||||
case *ast.CompositeLit:
|
||||
a.apply(n, "Type", nil, n.Type)
|
||||
a.applyList(n, "Elts")
|
||||
|
||||
case *ast.ParenExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
|
||||
case *ast.SelectorExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
a.apply(n, "Sel", nil, n.Sel)
|
||||
|
||||
case *ast.IndexExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
a.apply(n, "Index", nil, n.Index)
|
||||
|
||||
case *ast.SliceExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
a.apply(n, "Low", nil, n.Low)
|
||||
a.apply(n, "High", nil, n.High)
|
||||
a.apply(n, "Max", nil, n.Max)
|
||||
|
||||
case *ast.TypeAssertExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
a.apply(n, "Type", nil, n.Type)
|
||||
|
||||
case *ast.CallExpr:
|
||||
a.apply(n, "Fun", nil, n.Fun)
|
||||
a.applyList(n, "Args")
|
||||
|
||||
case *ast.StarExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
|
||||
case *ast.UnaryExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
|
||||
case *ast.BinaryExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
a.apply(n, "Y", nil, n.Y)
|
||||
|
||||
case *ast.KeyValueExpr:
|
||||
a.apply(n, "Key", nil, n.Key)
|
||||
a.apply(n, "Value", nil, n.Value)
|
||||
|
||||
// Types
|
||||
case *ast.ArrayType:
|
||||
a.apply(n, "Len", nil, n.Len)
|
||||
a.apply(n, "Elt", nil, n.Elt)
|
||||
|
||||
case *ast.StructType:
|
||||
a.apply(n, "Fields", nil, n.Fields)
|
||||
|
||||
case *ast.FuncType:
|
||||
a.apply(n, "Params", nil, n.Params)
|
||||
a.apply(n, "Results", nil, n.Results)
|
||||
|
||||
case *ast.InterfaceType:
|
||||
a.apply(n, "Methods", nil, n.Methods)
|
||||
|
||||
case *ast.MapType:
|
||||
a.apply(n, "Key", nil, n.Key)
|
||||
a.apply(n, "Value", nil, n.Value)
|
||||
|
||||
case *ast.ChanType:
|
||||
a.apply(n, "Value", nil, n.Value)
|
||||
|
||||
// Statements
|
||||
case *ast.BadStmt:
|
||||
// nothing to do
|
||||
|
||||
case *ast.DeclStmt:
|
||||
a.apply(n, "Decl", nil, n.Decl)
|
||||
|
||||
case *ast.EmptyStmt:
|
||||
// nothing to do
|
||||
|
||||
case *ast.LabeledStmt:
|
||||
a.apply(n, "Label", nil, n.Label)
|
||||
a.apply(n, "Stmt", nil, n.Stmt)
|
||||
|
||||
case *ast.ExprStmt:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
|
||||
case *ast.SendStmt:
|
||||
a.apply(n, "Chan", nil, n.Chan)
|
||||
a.apply(n, "Value", nil, n.Value)
|
||||
|
||||
case *ast.IncDecStmt:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
|
||||
case *ast.AssignStmt:
|
||||
a.applyList(n, "Lhs")
|
||||
a.applyList(n, "Rhs")
|
||||
|
||||
case *ast.GoStmt:
|
||||
a.apply(n, "Call", nil, n.Call)
|
||||
|
||||
case *ast.DeferStmt:
|
||||
a.apply(n, "Call", nil, n.Call)
|
||||
|
||||
case *ast.ReturnStmt:
|
||||
a.applyList(n, "Results")
|
||||
|
||||
case *ast.BranchStmt:
|
||||
a.apply(n, "Label", nil, n.Label)
|
||||
|
||||
case *ast.BlockStmt:
|
||||
a.applyList(n, "List")
|
||||
|
||||
case *ast.IfStmt:
|
||||
a.apply(n, "Init", nil, n.Init)
|
||||
a.apply(n, "Cond", nil, n.Cond)
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
a.apply(n, "Else", nil, n.Else)
|
||||
|
||||
case *ast.CaseClause:
|
||||
a.applyList(n, "List")
|
||||
a.applyList(n, "Body")
|
||||
|
||||
case *ast.SwitchStmt:
|
||||
a.apply(n, "Init", nil, n.Init)
|
||||
a.apply(n, "Tag", nil, n.Tag)
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
|
||||
case *ast.TypeSwitchStmt:
|
||||
a.apply(n, "Init", nil, n.Init)
|
||||
a.apply(n, "Assign", nil, n.Assign)
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
|
||||
case *ast.CommClause:
|
||||
a.apply(n, "Comm", nil, n.Comm)
|
||||
a.applyList(n, "Body")
|
||||
|
||||
case *ast.SelectStmt:
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
|
||||
case *ast.ForStmt:
|
||||
a.apply(n, "Init", nil, n.Init)
|
||||
a.apply(n, "Cond", nil, n.Cond)
|
||||
a.apply(n, "Post", nil, n.Post)
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
|
||||
case *ast.RangeStmt:
|
||||
a.apply(n, "Key", nil, n.Key)
|
||||
a.apply(n, "Value", nil, n.Value)
|
||||
a.apply(n, "X", nil, n.X)
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
|
||||
// Declarations
|
||||
case *ast.ImportSpec:
|
||||
a.apply(n, "Doc", nil, n.Doc)
|
||||
a.apply(n, "Name", nil, n.Name)
|
||||
a.apply(n, "Path", nil, n.Path)
|
||||
a.apply(n, "Comment", nil, n.Comment)
|
||||
|
||||
case *ast.ValueSpec:
|
||||
a.apply(n, "Doc", nil, n.Doc)
|
||||
a.applyList(n, "Names")
|
||||
a.apply(n, "Type", nil, n.Type)
|
||||
a.applyList(n, "Values")
|
||||
a.apply(n, "Comment", nil, n.Comment)
|
||||
|
||||
case *ast.TypeSpec:
|
||||
a.apply(n, "Doc", nil, n.Doc)
|
||||
a.apply(n, "Name", nil, n.Name)
|
||||
a.apply(n, "Type", nil, n.Type)
|
||||
a.apply(n, "Comment", nil, n.Comment)
|
||||
|
||||
case *ast.BadDecl:
|
||||
// nothing to do
|
||||
|
||||
case *ast.GenDecl:
|
||||
a.apply(n, "Doc", nil, n.Doc)
|
||||
a.applyList(n, "Specs")
|
||||
|
||||
case *ast.FuncDecl:
|
||||
a.apply(n, "Doc", nil, n.Doc)
|
||||
a.apply(n, "Recv", nil, n.Recv)
|
||||
a.apply(n, "Name", nil, n.Name)
|
||||
a.apply(n, "Type", nil, n.Type)
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
|
||||
// Files and packages
|
||||
case *ast.File:
|
||||
a.apply(n, "Doc", nil, n.Doc)
|
||||
a.apply(n, "Name", nil, n.Name)
|
||||
a.applyList(n, "Decls")
|
||||
// Don't walk n.Comments; they have either been walked already if
|
||||
// they are Doc comments, or they can be easily walked explicitly.
|
||||
|
||||
case *ast.Package:
|
||||
// collect and sort names for reproducible behavior
|
||||
var names []string
|
||||
for name := range n.Files {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
for _, name := range names {
|
||||
a.apply(n, name, nil, n.Files[name])
|
||||
}
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("Apply: unexpected node type %T", n))
|
||||
}
|
||||
|
||||
if a.post != nil && !a.post(&a.cursor) {
|
||||
panic(abort)
|
||||
}
|
||||
|
||||
a.cursor = saved
|
||||
}
|
||||
|
||||
// An iterator controls iteration over a slice of nodes.
|
||||
type iterator struct {
|
||||
index, step int
|
||||
}
|
||||
|
||||
func (a *application) applyList(parent ast.Node, name string) {
|
||||
// avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead
|
||||
saved := a.iter
|
||||
a.iter.index = 0
|
||||
for {
|
||||
// must reload parent.name each time, since cursor modifications might change it
|
||||
v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name)
|
||||
if a.iter.index >= v.Len() {
|
||||
break
|
||||
}
|
||||
|
||||
// element x may be nil in a bad AST - be cautious
|
||||
var x ast.Node
|
||||
if e := v.Index(a.iter.index); e.IsValid() {
|
||||
x = e.Interface().(ast.Node)
|
||||
}
|
||||
|
||||
a.iter.step = 1
|
||||
a.apply(parent, name, &a.iter, x)
|
||||
a.iter.index += a.iter.step
|
||||
}
|
||||
a.iter = saved
|
||||
}
|
||||
14
vendor/golang.org/x/tools/go/ast/astutil/util.go
generated
vendored
Normal file
14
vendor/golang.org/x/tools/go/ast/astutil/util.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package astutil
|
||||
|
||||
import "go/ast"
|
||||
|
||||
// Unparen returns e with any enclosing parentheses stripped.
|
||||
func Unparen(e ast.Expr) ast.Expr {
|
||||
for {
|
||||
p, ok := e.(*ast.ParenExpr)
|
||||
if !ok {
|
||||
return e
|
||||
}
|
||||
e = p.X
|
||||
}
|
||||
}
|
||||
198
vendor/golang.org/x/tools/go/buildutil/allpackages.go
generated
vendored
Normal file
198
vendor/golang.org/x/tools/go/buildutil/allpackages.go
generated
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package buildutil provides utilities related to the go/build
|
||||
// package in the standard library.
|
||||
//
|
||||
// All I/O is done via the build.Context file system interface, which must
|
||||
// be concurrency-safe.
|
||||
package buildutil // import "golang.org/x/tools/go/buildutil"
|
||||
|
||||
import (
|
||||
"go/build"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// AllPackages returns the package path of each Go package in any source
|
||||
// directory of the specified build context (e.g. $GOROOT or an element
|
||||
// of $GOPATH). Errors are ignored. The results are sorted.
|
||||
// All package paths are canonical, and thus may contain "/vendor/".
|
||||
//
|
||||
// The result may include import paths for directories that contain no
|
||||
// *.go files, such as "archive" (in $GOROOT/src).
|
||||
//
|
||||
// All I/O is done via the build.Context file system interface,
|
||||
// which must be concurrency-safe.
|
||||
//
|
||||
func AllPackages(ctxt *build.Context) []string {
|
||||
var list []string
|
||||
ForEachPackage(ctxt, func(pkg string, _ error) {
|
||||
list = append(list, pkg)
|
||||
})
|
||||
sort.Strings(list)
|
||||
return list
|
||||
}
|
||||
|
||||
// ForEachPackage calls the found function with the package path of
|
||||
// each Go package it finds in any source directory of the specified
|
||||
// build context (e.g. $GOROOT or an element of $GOPATH).
|
||||
// All package paths are canonical, and thus may contain "/vendor/".
|
||||
//
|
||||
// If the package directory exists but could not be read, the second
|
||||
// argument to the found function provides the error.
|
||||
//
|
||||
// All I/O is done via the build.Context file system interface,
|
||||
// which must be concurrency-safe.
|
||||
//
|
||||
func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) {
|
||||
ch := make(chan item)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, root := range ctxt.SrcDirs() {
|
||||
root := root
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
allPackages(ctxt, root, ch)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
// All calls to found occur in the caller's goroutine.
|
||||
for i := range ch {
|
||||
found(i.importPath, i.err)
|
||||
}
|
||||
}
|
||||
|
||||
type item struct {
|
||||
importPath string
|
||||
err error // (optional)
|
||||
}
|
||||
|
||||
// We use a process-wide counting semaphore to limit
|
||||
// the number of parallel calls to ReadDir.
|
||||
var ioLimit = make(chan bool, 20)
|
||||
|
||||
func allPackages(ctxt *build.Context, root string, ch chan<- item) {
|
||||
root = filepath.Clean(root) + string(os.PathSeparator)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var walkDir func(dir string)
|
||||
walkDir = func(dir string) {
|
||||
// Avoid .foo, _foo, and testdata directory trees.
|
||||
base := filepath.Base(dir)
|
||||
if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" {
|
||||
return
|
||||
}
|
||||
|
||||
pkg := filepath.ToSlash(strings.TrimPrefix(dir, root))
|
||||
|
||||
// Prune search if we encounter any of these import paths.
|
||||
switch pkg {
|
||||
case "builtin":
|
||||
return
|
||||
}
|
||||
|
||||
ioLimit <- true
|
||||
files, err := ReadDir(ctxt, dir)
|
||||
<-ioLimit
|
||||
if pkg != "" || err != nil {
|
||||
ch <- item{pkg, err}
|
||||
}
|
||||
for _, fi := range files {
|
||||
fi := fi
|
||||
if fi.IsDir() {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
walkDir(filepath.Join(dir, fi.Name()))
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
walkDir(root)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// ExpandPatterns returns the set of packages matched by patterns,
|
||||
// which may have the following forms:
|
||||
//
|
||||
// golang.org/x/tools/cmd/guru # a single package
|
||||
// golang.org/x/tools/... # all packages beneath dir
|
||||
// ... # the entire workspace.
|
||||
//
|
||||
// Order is significant: a pattern preceded by '-' removes matching
|
||||
// packages from the set. For example, these patterns match all encoding
|
||||
// packages except encoding/xml:
|
||||
//
|
||||
// encoding/... -encoding/xml
|
||||
//
|
||||
// A trailing slash in a pattern is ignored. (Path components of Go
|
||||
// package names are separated by slash, not the platform's path separator.)
|
||||
//
|
||||
func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool {
|
||||
// TODO(adonovan): support other features of 'go list':
|
||||
// - "std"/"cmd"/"all" meta-packages
|
||||
// - "..." not at the end of a pattern
|
||||
// - relative patterns using "./" or "../" prefix
|
||||
|
||||
pkgs := make(map[string]bool)
|
||||
doPkg := func(pkg string, neg bool) {
|
||||
if neg {
|
||||
delete(pkgs, pkg)
|
||||
} else {
|
||||
pkgs[pkg] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Scan entire workspace if wildcards are present.
|
||||
// TODO(adonovan): opt: scan only the necessary subtrees of the workspace.
|
||||
var all []string
|
||||
for _, arg := range patterns {
|
||||
if strings.HasSuffix(arg, "...") {
|
||||
all = AllPackages(ctxt)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, arg := range patterns {
|
||||
if arg == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
neg := arg[0] == '-'
|
||||
if neg {
|
||||
arg = arg[1:]
|
||||
}
|
||||
|
||||
if arg == "..." {
|
||||
// ... matches all packages
|
||||
for _, pkg := range all {
|
||||
doPkg(pkg, neg)
|
||||
}
|
||||
} else if dir := strings.TrimSuffix(arg, "/..."); dir != arg {
|
||||
// dir/... matches all packages beneath dir
|
||||
for _, pkg := range all {
|
||||
if strings.HasPrefix(pkg, dir) &&
|
||||
(len(pkg) == len(dir) || pkg[len(dir)] == '/') {
|
||||
doPkg(pkg, neg)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// single package
|
||||
doPkg(strings.TrimSuffix(arg, "/"), neg)
|
||||
}
|
||||
}
|
||||
|
||||
return pkgs
|
||||
}
|
||||
109
vendor/golang.org/x/tools/go/buildutil/fakecontext.go
generated
vendored
Normal file
109
vendor/golang.org/x/tools/go/buildutil/fakecontext.go
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
package buildutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FakeContext returns a build.Context for the fake file tree specified
|
||||
// by pkgs, which maps package import paths to a mapping from file base
|
||||
// names to contents.
|
||||
//
|
||||
// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides
|
||||
// the necessary file access methods to read from memory instead of the
|
||||
// real file system.
|
||||
//
|
||||
// Unlike a real file tree, the fake one has only two levels---packages
|
||||
// and files---so ReadDir("/go/src/") returns all packages under
|
||||
// /go/src/ including, for instance, "math" and "math/big".
|
||||
// ReadDir("/go/src/math/big") would return all the files in the
|
||||
// "math/big" package.
|
||||
//
|
||||
func FakeContext(pkgs map[string]map[string]string) *build.Context {
|
||||
clean := func(filename string) string {
|
||||
f := path.Clean(filepath.ToSlash(filename))
|
||||
// Removing "/go/src" while respecting segment
|
||||
// boundaries has this unfortunate corner case:
|
||||
if f == "/go/src" {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimPrefix(f, "/go/src/")
|
||||
}
|
||||
|
||||
ctxt := build.Default // copy
|
||||
ctxt.GOROOT = "/go"
|
||||
ctxt.GOPATH = ""
|
||||
ctxt.Compiler = "gc"
|
||||
ctxt.IsDir = func(dir string) bool {
|
||||
dir = clean(dir)
|
||||
if dir == "" {
|
||||
return true // needed by (*build.Context).SrcDirs
|
||||
}
|
||||
return pkgs[dir] != nil
|
||||
}
|
||||
ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) {
|
||||
dir = clean(dir)
|
||||
var fis []os.FileInfo
|
||||
if dir == "" {
|
||||
// enumerate packages
|
||||
for importPath := range pkgs {
|
||||
fis = append(fis, fakeDirInfo(importPath))
|
||||
}
|
||||
} else {
|
||||
// enumerate files of package
|
||||
for basename := range pkgs[dir] {
|
||||
fis = append(fis, fakeFileInfo(basename))
|
||||
}
|
||||
}
|
||||
sort.Sort(byName(fis))
|
||||
return fis, nil
|
||||
}
|
||||
ctxt.OpenFile = func(filename string) (io.ReadCloser, error) {
|
||||
filename = clean(filename)
|
||||
dir, base := path.Split(filename)
|
||||
content, ok := pkgs[path.Clean(dir)][base]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("file not found: %s", filename)
|
||||
}
|
||||
return ioutil.NopCloser(strings.NewReader(content)), nil
|
||||
}
|
||||
ctxt.IsAbsPath = func(path string) bool {
|
||||
path = filepath.ToSlash(path)
|
||||
// Don't rely on the default (filepath.Path) since on
|
||||
// Windows, it reports virtual paths as non-absolute.
|
||||
return strings.HasPrefix(path, "/")
|
||||
}
|
||||
return &ctxt
|
||||
}
|
||||
|
||||
type byName []os.FileInfo
|
||||
|
||||
func (s byName) Len() int { return len(s) }
|
||||
func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
|
||||
|
||||
type fakeFileInfo string
|
||||
|
||||
func (fi fakeFileInfo) Name() string { return string(fi) }
|
||||
func (fakeFileInfo) Sys() interface{} { return nil }
|
||||
func (fakeFileInfo) ModTime() time.Time { return time.Time{} }
|
||||
func (fakeFileInfo) IsDir() bool { return false }
|
||||
func (fakeFileInfo) Size() int64 { return 0 }
|
||||
func (fakeFileInfo) Mode() os.FileMode { return 0644 }
|
||||
|
||||
type fakeDirInfo string
|
||||
|
||||
func (fd fakeDirInfo) Name() string { return string(fd) }
|
||||
func (fakeDirInfo) Sys() interface{} { return nil }
|
||||
func (fakeDirInfo) ModTime() time.Time { return time.Time{} }
|
||||
func (fakeDirInfo) IsDir() bool { return true }
|
||||
func (fakeDirInfo) Size() int64 { return 0 }
|
||||
func (fakeDirInfo) Mode() os.FileMode { return 0755 }
|
||||
103
vendor/golang.org/x/tools/go/buildutil/overlay.go
generated
vendored
Normal file
103
vendor/golang.org/x/tools/go/buildutil/overlay.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package buildutil
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// OverlayContext overlays a build.Context with additional files from
|
||||
// a map. Files in the map take precedence over other files.
|
||||
//
|
||||
// In addition to plain string comparison, two file names are
|
||||
// considered equal if their base names match and their directory
|
||||
// components point at the same directory on the file system. That is,
|
||||
// symbolic links are followed for directories, but not files.
|
||||
//
|
||||
// A common use case for OverlayContext is to allow editors to pass in
|
||||
// a set of unsaved, modified files.
|
||||
//
|
||||
// Currently, only the Context.OpenFile function will respect the
|
||||
// overlay. This may change in the future.
|
||||
func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Context {
|
||||
// TODO(dominikh): Implement IsDir, HasSubdir and ReadDir
|
||||
|
||||
rc := func(data []byte) (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bytes.NewBuffer(data)), nil
|
||||
}
|
||||
|
||||
copy := *orig // make a copy
|
||||
ctxt := ©
|
||||
ctxt.OpenFile = func(path string) (io.ReadCloser, error) {
|
||||
// Fast path: names match exactly.
|
||||
if content, ok := overlay[path]; ok {
|
||||
return rc(content)
|
||||
}
|
||||
|
||||
// Slow path: check for same file under a different
|
||||
// alias, perhaps due to a symbolic link.
|
||||
for filename, content := range overlay {
|
||||
if sameFile(path, filename) {
|
||||
return rc(content)
|
||||
}
|
||||
}
|
||||
|
||||
return OpenFile(orig, path)
|
||||
}
|
||||
return ctxt
|
||||
}
|
||||
|
||||
// ParseOverlayArchive parses an archive containing Go files and their
|
||||
// contents. The result is intended to be used with OverlayContext.
|
||||
//
|
||||
//
|
||||
// Archive format
|
||||
//
|
||||
// The archive consists of a series of files. Each file consists of a
|
||||
// name, a decimal file size and the file contents, separated by
|
||||
// newlines. No newline follows after the file contents.
|
||||
func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) {
|
||||
overlay := make(map[string][]byte)
|
||||
r := bufio.NewReader(archive)
|
||||
for {
|
||||
// Read file name.
|
||||
filename, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break // OK
|
||||
}
|
||||
return nil, fmt.Errorf("reading archive file name: %v", err)
|
||||
}
|
||||
filename = filepath.Clean(strings.TrimSpace(filename))
|
||||
|
||||
// Read file size.
|
||||
sz, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading size of archive file %s: %v", filename, err)
|
||||
}
|
||||
sz = strings.TrimSpace(sz)
|
||||
size, err := strconv.ParseUint(sz, 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing size of archive file %s: %v", filename, err)
|
||||
}
|
||||
|
||||
// Read file content.
|
||||
content := make([]byte, size)
|
||||
if _, err := io.ReadFull(r, content); err != nil {
|
||||
return nil, fmt.Errorf("reading archive file %s: %v", filename, err)
|
||||
}
|
||||
overlay[filename] = content
|
||||
}
|
||||
|
||||
return overlay, nil
|
||||
}
|
||||
75
vendor/golang.org/x/tools/go/buildutil/tags.go
generated
vendored
Normal file
75
vendor/golang.org/x/tools/go/buildutil/tags.go
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
package buildutil
|
||||
|
||||
// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go.
|
||||
|
||||
import "fmt"
|
||||
|
||||
const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " +
|
||||
"For more information about build tags, see the description of " +
|
||||
"build constraints in the documentation for the go/build package"
|
||||
|
||||
// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses
|
||||
// a flag value in the same manner as go build's -tags flag and
|
||||
// populates a []string slice.
|
||||
//
|
||||
// See $GOROOT/src/go/build/doc.go for description of build tags.
|
||||
// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag.
|
||||
//
|
||||
// Example:
|
||||
// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc)
|
||||
type TagsFlag []string
|
||||
|
||||
func (v *TagsFlag) Set(s string) error {
|
||||
var err error
|
||||
*v, err = splitQuotedFields(s)
|
||||
if *v == nil {
|
||||
*v = []string{}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (v *TagsFlag) Get() interface{} { return *v }
|
||||
|
||||
func splitQuotedFields(s string) ([]string, error) {
|
||||
// Split fields allowing '' or "" around elements.
|
||||
// Quotes further inside the string do not count.
|
||||
var f []string
|
||||
for len(s) > 0 {
|
||||
for len(s) > 0 && isSpaceByte(s[0]) {
|
||||
s = s[1:]
|
||||
}
|
||||
if len(s) == 0 {
|
||||
break
|
||||
}
|
||||
// Accepted quoted string. No unescaping inside.
|
||||
if s[0] == '"' || s[0] == '\'' {
|
||||
quote := s[0]
|
||||
s = s[1:]
|
||||
i := 0
|
||||
for i < len(s) && s[i] != quote {
|
||||
i++
|
||||
}
|
||||
if i >= len(s) {
|
||||
return nil, fmt.Errorf("unterminated %c string", quote)
|
||||
}
|
||||
f = append(f, s[:i])
|
||||
s = s[i+1:]
|
||||
continue
|
||||
}
|
||||
i := 0
|
||||
for i < len(s) && !isSpaceByte(s[i]) {
|
||||
i++
|
||||
}
|
||||
f = append(f, s[:i])
|
||||
s = s[i:]
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (v *TagsFlag) String() string {
|
||||
return "<tagsFlag>"
|
||||
}
|
||||
|
||||
func isSpaceByte(c byte) bool {
|
||||
return c == ' ' || c == '\t' || c == '\n' || c == '\r'
|
||||
}
|
||||
212
vendor/golang.org/x/tools/go/buildutil/util.go
generated
vendored
Normal file
212
vendor/golang.org/x/tools/go/buildutil/util.go
generated
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package buildutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseFile behaves like parser.ParseFile,
|
||||
// but uses the build context's file system interface, if any.
|
||||
//
|
||||
// If file is not absolute (as defined by IsAbsPath), the (dir, file)
|
||||
// components are joined using JoinPath; dir must be absolute.
|
||||
//
|
||||
// The displayPath function, if provided, is used to transform the
|
||||
// filename that will be attached to the ASTs.
|
||||
//
|
||||
// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws.
|
||||
//
|
||||
func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) {
|
||||
if !IsAbsPath(ctxt, file) {
|
||||
file = JoinPath(ctxt, dir, file)
|
||||
}
|
||||
rd, err := OpenFile(ctxt, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rd.Close() // ignore error
|
||||
if displayPath != nil {
|
||||
file = displayPath(file)
|
||||
}
|
||||
return parser.ParseFile(fset, file, rd, mode)
|
||||
}
|
||||
|
||||
// ContainingPackage returns the package containing filename.
|
||||
//
|
||||
// If filename is not absolute, it is interpreted relative to working directory dir.
|
||||
// All I/O is via the build context's file system interface, if any.
|
||||
//
|
||||
// The '...Files []string' fields of the resulting build.Package are not
|
||||
// populated (build.FindOnly mode).
|
||||
//
|
||||
func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) {
|
||||
if !IsAbsPath(ctxt, filename) {
|
||||
filename = JoinPath(ctxt, dir, filename)
|
||||
}
|
||||
|
||||
// We must not assume the file tree uses
|
||||
// "/" always,
|
||||
// `\` always,
|
||||
// or os.PathSeparator (which varies by platform),
|
||||
// but to make any progress, we are forced to assume that
|
||||
// paths will not use `\` unless the PathSeparator
|
||||
// is also `\`, thus we can rely on filepath.ToSlash for some sanity.
|
||||
|
||||
dirSlash := path.Dir(filepath.ToSlash(filename)) + "/"
|
||||
|
||||
// We assume that no source root (GOPATH[i] or GOROOT) contains any other.
|
||||
for _, srcdir := range ctxt.SrcDirs() {
|
||||
srcdirSlash := filepath.ToSlash(srcdir) + "/"
|
||||
if importPath, ok := HasSubdir(ctxt, srcdirSlash, dirSlash); ok {
|
||||
return ctxt.Import(importPath, dir, build.FindOnly)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("can't find package containing %s", filename)
|
||||
}
|
||||
|
||||
// -- Effective methods of file system interface -------------------------
|
||||
|
||||
// (go/build.Context defines these as methods, but does not export them.)
|
||||
|
||||
// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
|
||||
// the local file system to answer the question.
|
||||
func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) {
|
||||
if f := ctxt.HasSubdir; f != nil {
|
||||
return f(root, dir)
|
||||
}
|
||||
|
||||
// Try using paths we received.
|
||||
if rel, ok = hasSubdir(root, dir); ok {
|
||||
return
|
||||
}
|
||||
|
||||
// Try expanding symlinks and comparing
|
||||
// expanded against unexpanded and
|
||||
// expanded against expanded.
|
||||
rootSym, _ := filepath.EvalSymlinks(root)
|
||||
dirSym, _ := filepath.EvalSymlinks(dir)
|
||||
|
||||
if rel, ok = hasSubdir(rootSym, dir); ok {
|
||||
return
|
||||
}
|
||||
if rel, ok = hasSubdir(root, dirSym); ok {
|
||||
return
|
||||
}
|
||||
return hasSubdir(rootSym, dirSym)
|
||||
}
|
||||
|
||||
func hasSubdir(root, dir string) (rel string, ok bool) {
|
||||
const sep = string(filepath.Separator)
|
||||
root = filepath.Clean(root)
|
||||
if !strings.HasSuffix(root, sep) {
|
||||
root += sep
|
||||
}
|
||||
|
||||
dir = filepath.Clean(dir)
|
||||
if !strings.HasPrefix(dir, root) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
return filepath.ToSlash(dir[len(root):]), true
|
||||
}
|
||||
|
||||
// FileExists returns true if the specified file exists,
|
||||
// using the build context's file system interface.
|
||||
func FileExists(ctxt *build.Context, path string) bool {
|
||||
if ctxt.OpenFile != nil {
|
||||
r, err := ctxt.OpenFile(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
r.Close() // ignore error
|
||||
return true
|
||||
}
|
||||
_, err := os.Stat(path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// OpenFile behaves like os.Open,
|
||||
// but uses the build context's file system interface, if any.
|
||||
func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) {
|
||||
if ctxt.OpenFile != nil {
|
||||
return ctxt.OpenFile(path)
|
||||
}
|
||||
return os.Open(path)
|
||||
}
|
||||
|
||||
// IsAbsPath behaves like filepath.IsAbs,
|
||||
// but uses the build context's file system interface, if any.
|
||||
func IsAbsPath(ctxt *build.Context, path string) bool {
|
||||
if ctxt.IsAbsPath != nil {
|
||||
return ctxt.IsAbsPath(path)
|
||||
}
|
||||
return filepath.IsAbs(path)
|
||||
}
|
||||
|
||||
// JoinPath behaves like filepath.Join,
|
||||
// but uses the build context's file system interface, if any.
|
||||
func JoinPath(ctxt *build.Context, path ...string) string {
|
||||
if ctxt.JoinPath != nil {
|
||||
return ctxt.JoinPath(path...)
|
||||
}
|
||||
return filepath.Join(path...)
|
||||
}
|
||||
|
||||
// IsDir behaves like os.Stat plus IsDir,
|
||||
// but uses the build context's file system interface, if any.
|
||||
func IsDir(ctxt *build.Context, path string) bool {
|
||||
if ctxt.IsDir != nil {
|
||||
return ctxt.IsDir(path)
|
||||
}
|
||||
fi, err := os.Stat(path)
|
||||
return err == nil && fi.IsDir()
|
||||
}
|
||||
|
||||
// ReadDir behaves like ioutil.ReadDir,
|
||||
// but uses the build context's file system interface, if any.
|
||||
func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) {
|
||||
if ctxt.ReadDir != nil {
|
||||
return ctxt.ReadDir(path)
|
||||
}
|
||||
return ioutil.ReadDir(path)
|
||||
}
|
||||
|
||||
// SplitPathList behaves like filepath.SplitList,
|
||||
// but uses the build context's file system interface, if any.
|
||||
func SplitPathList(ctxt *build.Context, s string) []string {
|
||||
if ctxt.SplitPathList != nil {
|
||||
return ctxt.SplitPathList(s)
|
||||
}
|
||||
return filepath.SplitList(s)
|
||||
}
|
||||
|
||||
// sameFile returns true if x and y have the same basename and denote
|
||||
// the same file.
|
||||
//
|
||||
func sameFile(x, y string) bool {
|
||||
if path.Clean(x) == path.Clean(y) {
|
||||
return true
|
||||
}
|
||||
if filepath.Base(x) == filepath.Base(y) { // (optimisation)
|
||||
if xi, err := os.Stat(x); err == nil {
|
||||
if yi, err := os.Stat(y); err == nil {
|
||||
return os.SameFile(xi, yi)
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
129
vendor/golang.org/x/tools/go/callgraph/callgraph.go
generated
vendored
Normal file
129
vendor/golang.org/x/tools/go/callgraph/callgraph.go
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
|
||||
Package callgraph defines the call graph and various algorithms
|
||||
and utilities to operate on it.
|
||||
|
||||
A call graph is a labelled directed graph whose nodes represent
|
||||
functions and whose edge labels represent syntactic function call
|
||||
sites. The presence of a labelled edge (caller, site, callee)
|
||||
indicates that caller may call callee at the specified call site.
|
||||
|
||||
A call graph is a multigraph: it may contain multiple edges (caller,
|
||||
*, callee) connecting the same pair of nodes, so long as the edges
|
||||
differ by label; this occurs when one function calls another function
|
||||
from multiple call sites. Also, it may contain multiple edges
|
||||
(caller, site, *) that differ only by callee; this indicates a
|
||||
polymorphic call.
|
||||
|
||||
A SOUND call graph is one that overapproximates the dynamic calling
|
||||
behaviors of the program in all possible executions. One call graph
|
||||
is more PRECISE than another if it is a smaller overapproximation of
|
||||
the dynamic behavior.
|
||||
|
||||
All call graphs have a synthetic root node which is responsible for
|
||||
calling main() and init().
|
||||
|
||||
Calls to built-in functions (e.g. panic, println) are not represented
|
||||
in the call graph; they are treated like built-in operators of the
|
||||
language.
|
||||
|
||||
*/
|
||||
package callgraph // import "golang.org/x/tools/go/callgraph"
|
||||
|
||||
// TODO(adonovan): add a function to eliminate wrappers from the
|
||||
// callgraph, preserving topology.
|
||||
// More generally, we could eliminate "uninteresting" nodes such as
|
||||
// nodes from packages we don't care about.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
|
||||
"golang.org/x/tools/go/ssa"
|
||||
)
|
||||
|
||||
// A Graph represents a call graph.
|
||||
//
|
||||
// A graph may contain nodes that are not reachable from the root.
|
||||
// If the call graph is sound, such nodes indicate unreachable
|
||||
// functions.
|
||||
//
|
||||
type Graph struct {
|
||||
Root *Node // the distinguished root node
|
||||
Nodes map[*ssa.Function]*Node // all nodes by function
|
||||
}
|
||||
|
||||
// New returns a new Graph with the specified root node.
|
||||
func New(root *ssa.Function) *Graph {
|
||||
g := &Graph{Nodes: make(map[*ssa.Function]*Node)}
|
||||
g.Root = g.CreateNode(root)
|
||||
return g
|
||||
}
|
||||
|
||||
// CreateNode returns the Node for fn, creating it if not present.
|
||||
func (g *Graph) CreateNode(fn *ssa.Function) *Node {
|
||||
n, ok := g.Nodes[fn]
|
||||
if !ok {
|
||||
n = &Node{Func: fn, ID: len(g.Nodes)}
|
||||
g.Nodes[fn] = n
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// A Node represents a node in a call graph.
|
||||
type Node struct {
|
||||
Func *ssa.Function // the function this node represents
|
||||
ID int // 0-based sequence number
|
||||
In []*Edge // unordered set of incoming call edges (n.In[*].Callee == n)
|
||||
Out []*Edge // unordered set of outgoing call edges (n.Out[*].Caller == n)
|
||||
}
|
||||
|
||||
func (n *Node) String() string {
|
||||
return fmt.Sprintf("n%d:%s", n.ID, n.Func)
|
||||
}
|
||||
|
||||
// A Edge represents an edge in the call graph.
|
||||
//
|
||||
// Site is nil for edges originating in synthetic or intrinsic
|
||||
// functions, e.g. reflect.Call or the root of the call graph.
|
||||
type Edge struct {
|
||||
Caller *Node
|
||||
Site ssa.CallInstruction
|
||||
Callee *Node
|
||||
}
|
||||
|
||||
func (e Edge) String() string {
|
||||
return fmt.Sprintf("%s --> %s", e.Caller, e.Callee)
|
||||
}
|
||||
|
||||
func (e Edge) Description() string {
|
||||
var prefix string
|
||||
switch e.Site.(type) {
|
||||
case nil:
|
||||
return "synthetic call"
|
||||
case *ssa.Go:
|
||||
prefix = "concurrent "
|
||||
case *ssa.Defer:
|
||||
prefix = "deferred "
|
||||
}
|
||||
return prefix + e.Site.Common().Description()
|
||||
}
|
||||
|
||||
func (e Edge) Pos() token.Pos {
|
||||
if e.Site == nil {
|
||||
return token.NoPos
|
||||
}
|
||||
return e.Site.Pos()
|
||||
}
|
||||
|
||||
// AddEdge adds the edge (caller, site, callee) to the call graph.
|
||||
// Elimination of duplicate edges is the caller's responsibility.
|
||||
func AddEdge(caller *Node, site ssa.CallInstruction, callee *Node) {
|
||||
e := &Edge{caller, site, callee}
|
||||
callee.In = append(callee.In, e)
|
||||
caller.Out = append(caller.Out, e)
|
||||
}
|
||||
139
vendor/golang.org/x/tools/go/callgraph/cha/cha.go
generated
vendored
Normal file
139
vendor/golang.org/x/tools/go/callgraph/cha/cha.go
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cha computes the call graph of a Go program using the Class
|
||||
// Hierarchy Analysis (CHA) algorithm.
|
||||
//
|
||||
// CHA was first described in "Optimization of Object-Oriented Programs
|
||||
// Using Static Class Hierarchy Analysis", Jeffrey Dean, David Grove,
|
||||
// and Craig Chambers, ECOOP'95.
|
||||
//
|
||||
// CHA is related to RTA (see go/callgraph/rta); the difference is that
|
||||
// CHA conservatively computes the entire "implements" relation between
|
||||
// interfaces and concrete types ahead of time, whereas RTA uses dynamic
|
||||
// programming to construct it on the fly as it encounters new functions
|
||||
// reachable from main. CHA may thus include spurious call edges for
|
||||
// types that haven't been instantiated yet, or types that are never
|
||||
// instantiated.
|
||||
//
|
||||
// Since CHA conservatively assumes that all functions are address-taken
|
||||
// and all concrete types are put into interfaces, it is sound to run on
|
||||
// partial programs, such as libraries without a main or test function.
|
||||
//
|
||||
package cha // import "golang.org/x/tools/go/callgraph/cha"
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
|
||||
"golang.org/x/tools/go/callgraph"
|
||||
"golang.org/x/tools/go/ssa"
|
||||
"golang.org/x/tools/go/ssa/ssautil"
|
||||
"golang.org/x/tools/go/types/typeutil"
|
||||
)
|
||||
|
||||
// CallGraph computes the call graph of the specified program using the
|
||||
// Class Hierarchy Analysis algorithm.
|
||||
//
|
||||
func CallGraph(prog *ssa.Program) *callgraph.Graph {
|
||||
cg := callgraph.New(nil) // TODO(adonovan) eliminate concept of rooted callgraph
|
||||
|
||||
allFuncs := ssautil.AllFunctions(prog)
|
||||
|
||||
// funcsBySig contains all functions, keyed by signature. It is
|
||||
// the effective set of address-taken functions used to resolve
|
||||
// a dynamic call of a particular signature.
|
||||
var funcsBySig typeutil.Map // value is []*ssa.Function
|
||||
|
||||
// methodsByName contains all methods,
|
||||
// grouped by name for efficient lookup.
|
||||
// (methodsById would be better but not every SSA method has a go/types ID.)
|
||||
methodsByName := make(map[string][]*ssa.Function)
|
||||
|
||||
// An imethod represents an interface method I.m.
|
||||
// (There's no go/types object for it;
|
||||
// a *types.Func may be shared by many interfaces due to interface embedding.)
|
||||
type imethod struct {
|
||||
I *types.Interface
|
||||
id string
|
||||
}
|
||||
// methodsMemo records, for every abstract method call I.m on
|
||||
// interface type I, the set of concrete methods C.m of all
|
||||
// types C that satisfy interface I.
|
||||
//
|
||||
// Abstract methods may be shared by several interfaces,
|
||||
// hence we must pass I explicitly, not guess from m.
|
||||
//
|
||||
// methodsMemo is just a cache, so it needn't be a typeutil.Map.
|
||||
methodsMemo := make(map[imethod][]*ssa.Function)
|
||||
lookupMethods := func(I *types.Interface, m *types.Func) []*ssa.Function {
|
||||
id := m.Id()
|
||||
methods, ok := methodsMemo[imethod{I, id}]
|
||||
if !ok {
|
||||
for _, f := range methodsByName[m.Name()] {
|
||||
C := f.Signature.Recv().Type() // named or *named
|
||||
if types.Implements(C, I) {
|
||||
methods = append(methods, f)
|
||||
}
|
||||
}
|
||||
methodsMemo[imethod{I, id}] = methods
|
||||
}
|
||||
return methods
|
||||
}
|
||||
|
||||
for f := range allFuncs {
|
||||
if f.Signature.Recv() == nil {
|
||||
// Package initializers can never be address-taken.
|
||||
if f.Name() == "init" && f.Synthetic == "package initializer" {
|
||||
continue
|
||||
}
|
||||
funcs, _ := funcsBySig.At(f.Signature).([]*ssa.Function)
|
||||
funcs = append(funcs, f)
|
||||
funcsBySig.Set(f.Signature, funcs)
|
||||
} else {
|
||||
methodsByName[f.Name()] = append(methodsByName[f.Name()], f)
|
||||
}
|
||||
}
|
||||
|
||||
addEdge := func(fnode *callgraph.Node, site ssa.CallInstruction, g *ssa.Function) {
|
||||
gnode := cg.CreateNode(g)
|
||||
callgraph.AddEdge(fnode, site, gnode)
|
||||
}
|
||||
|
||||
addEdges := func(fnode *callgraph.Node, site ssa.CallInstruction, callees []*ssa.Function) {
|
||||
// Because every call to a highly polymorphic and
|
||||
// frequently used abstract method such as
|
||||
// (io.Writer).Write is assumed to call every concrete
|
||||
// Write method in the program, the call graph can
|
||||
// contain a lot of duplication.
|
||||
//
|
||||
// TODO(adonovan): opt: consider factoring the callgraph
|
||||
// API so that the Callers component of each edge is a
|
||||
// slice of nodes, not a singleton.
|
||||
for _, g := range callees {
|
||||
addEdge(fnode, site, g)
|
||||
}
|
||||
}
|
||||
|
||||
for f := range allFuncs {
|
||||
fnode := cg.CreateNode(f)
|
||||
for _, b := range f.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
if site, ok := instr.(ssa.CallInstruction); ok {
|
||||
call := site.Common()
|
||||
if call.IsInvoke() {
|
||||
tiface := call.Value.Type().Underlying().(*types.Interface)
|
||||
addEdges(fnode, site, lookupMethods(tiface, call.Method))
|
||||
} else if g := call.StaticCallee(); g != nil {
|
||||
addEdge(fnode, site, g)
|
||||
} else if _, ok := call.Value.(*ssa.Builtin); !ok {
|
||||
callees, _ := funcsBySig.At(call.Signature()).([]*ssa.Function)
|
||||
addEdges(fnode, site, callees)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cg
|
||||
}
|
||||
459
vendor/golang.org/x/tools/go/callgraph/rta/rta.go
generated
vendored
Normal file
459
vendor/golang.org/x/tools/go/callgraph/rta/rta.go
generated
vendored
Normal file
@@ -0,0 +1,459 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This package provides Rapid Type Analysis (RTA) for Go, a fast
|
||||
// algorithm for call graph construction and discovery of reachable code
|
||||
// (and hence dead code) and runtime types. The algorithm was first
|
||||
// described in:
|
||||
//
|
||||
// David F. Bacon and Peter F. Sweeney. 1996.
|
||||
// Fast static analysis of C++ virtual function calls. (OOPSLA '96)
|
||||
// http://doi.acm.org/10.1145/236337.236371
|
||||
//
|
||||
// The algorithm uses dynamic programming to tabulate the cross-product
|
||||
// of the set of known "address taken" functions with the set of known
|
||||
// dynamic calls of the same type. As each new address-taken function
|
||||
// is discovered, call graph edges are added from each known callsite,
|
||||
// and as each new call site is discovered, call graph edges are added
|
||||
// from it to each known address-taken function.
|
||||
//
|
||||
// A similar approach is used for dynamic calls via interfaces: it
|
||||
// tabulates the cross-product of the set of known "runtime types",
|
||||
// i.e. types that may appear in an interface value, or be derived from
|
||||
// one via reflection, with the set of known "invoke"-mode dynamic
|
||||
// calls. As each new "runtime type" is discovered, call edges are
|
||||
// added from the known call sites, and as each new call site is
|
||||
// discovered, call graph edges are added to each compatible
|
||||
// method.
|
||||
//
|
||||
// In addition, we must consider all exported methods of any runtime type
|
||||
// as reachable, since they may be called via reflection.
|
||||
//
|
||||
// Each time a newly added call edge causes a new function to become
|
||||
// reachable, the code of that function is analyzed for more call sites,
|
||||
// address-taken functions, and runtime types. The process continues
|
||||
// until a fixed point is achieved.
|
||||
//
|
||||
// The resulting call graph is less precise than one produced by pointer
|
||||
// analysis, but the algorithm is much faster. For example, running the
|
||||
// cmd/callgraph tool on its own source takes ~2.1s for RTA and ~5.4s
|
||||
// for points-to analysis.
|
||||
//
|
||||
package rta // import "golang.org/x/tools/go/callgraph/rta"
|
||||
|
||||
// TODO(adonovan): test it by connecting it to the interpreter and
|
||||
// replacing all "unreachable" functions by a special intrinsic, and
|
||||
// ensure that that intrinsic is never called.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/types"
|
||||
|
||||
"golang.org/x/tools/go/callgraph"
|
||||
"golang.org/x/tools/go/ssa"
|
||||
"golang.org/x/tools/go/types/typeutil"
|
||||
)
|
||||
|
||||
// A Result holds the results of Rapid Type Analysis, which includes the
|
||||
// set of reachable functions/methods, runtime types, and the call graph.
|
||||
//
|
||||
type Result struct {
|
||||
// CallGraph is the discovered callgraph.
|
||||
// It does not include edges for calls made via reflection.
|
||||
CallGraph *callgraph.Graph
|
||||
|
||||
// Reachable contains the set of reachable functions and methods.
|
||||
// This includes exported methods of runtime types, since
|
||||
// they may be accessed via reflection.
|
||||
// The value indicates whether the function is address-taken.
|
||||
//
|
||||
// (We wrap the bool in a struct to avoid inadvertent use of
|
||||
// "if Reachable[f] {" to test for set membership.)
|
||||
Reachable map[*ssa.Function]struct{ AddrTaken bool }
|
||||
|
||||
// RuntimeTypes contains the set of types that are needed at
|
||||
// runtime, for interfaces or reflection.
|
||||
//
|
||||
// The value indicates whether the type is inaccessible to reflection.
|
||||
// Consider:
|
||||
// type A struct{B}
|
||||
// fmt.Println(new(A))
|
||||
// Types *A, A and B are accessible to reflection, but the unnamed
|
||||
// type struct{B} is not.
|
||||
RuntimeTypes typeutil.Map
|
||||
}
|
||||
|
||||
// Working state of the RTA algorithm.
|
||||
type rta struct {
|
||||
result *Result
|
||||
|
||||
prog *ssa.Program
|
||||
|
||||
worklist []*ssa.Function // list of functions to visit
|
||||
|
||||
// addrTakenFuncsBySig contains all address-taken *Functions, grouped by signature.
|
||||
// Keys are *types.Signature, values are map[*ssa.Function]bool sets.
|
||||
addrTakenFuncsBySig typeutil.Map
|
||||
|
||||
// dynCallSites contains all dynamic "call"-mode call sites, grouped by signature.
|
||||
// Keys are *types.Signature, values are unordered []ssa.CallInstruction.
|
||||
dynCallSites typeutil.Map
|
||||
|
||||
// invokeSites contains all "invoke"-mode call sites, grouped by interface.
|
||||
// Keys are *types.Interface (never *types.Named),
|
||||
// Values are unordered []ssa.CallInstruction sets.
|
||||
invokeSites typeutil.Map
|
||||
|
||||
// The following two maps together define the subset of the
|
||||
// m:n "implements" relation needed by the algorithm.
|
||||
|
||||
// concreteTypes maps each concrete type to the set of interfaces that it implements.
|
||||
// Keys are types.Type, values are unordered []*types.Interface.
|
||||
// Only concrete types used as MakeInterface operands are included.
|
||||
concreteTypes typeutil.Map
|
||||
|
||||
// interfaceTypes maps each interface type to
|
||||
// the set of concrete types that implement it.
|
||||
// Keys are *types.Interface, values are unordered []types.Type.
|
||||
// Only interfaces used in "invoke"-mode CallInstructions are included.
|
||||
interfaceTypes typeutil.Map
|
||||
}
|
||||
|
||||
// addReachable marks a function as potentially callable at run-time,
|
||||
// and ensures that it gets processed.
|
||||
func (r *rta) addReachable(f *ssa.Function, addrTaken bool) {
|
||||
reachable := r.result.Reachable
|
||||
n := len(reachable)
|
||||
v := reachable[f]
|
||||
if addrTaken {
|
||||
v.AddrTaken = true
|
||||
}
|
||||
reachable[f] = v
|
||||
if len(reachable) > n {
|
||||
// First time seeing f. Add it to the worklist.
|
||||
r.worklist = append(r.worklist, f)
|
||||
}
|
||||
}
|
||||
|
||||
// addEdge adds the specified call graph edge, and marks it reachable.
|
||||
// addrTaken indicates whether to mark the callee as "address-taken".
|
||||
func (r *rta) addEdge(site ssa.CallInstruction, callee *ssa.Function, addrTaken bool) {
|
||||
r.addReachable(callee, addrTaken)
|
||||
|
||||
if g := r.result.CallGraph; g != nil {
|
||||
if site.Parent() == nil {
|
||||
panic(site)
|
||||
}
|
||||
from := g.CreateNode(site.Parent())
|
||||
to := g.CreateNode(callee)
|
||||
callgraph.AddEdge(from, site, to)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------- addrTakenFuncs × dynCallSites ----------
|
||||
|
||||
// visitAddrTakenFunc is called each time we encounter an address-taken function f.
|
||||
func (r *rta) visitAddrTakenFunc(f *ssa.Function) {
|
||||
// Create two-level map (Signature -> Function -> bool).
|
||||
S := f.Signature
|
||||
funcs, _ := r.addrTakenFuncsBySig.At(S).(map[*ssa.Function]bool)
|
||||
if funcs == nil {
|
||||
funcs = make(map[*ssa.Function]bool)
|
||||
r.addrTakenFuncsBySig.Set(S, funcs)
|
||||
}
|
||||
if !funcs[f] {
|
||||
// First time seeing f.
|
||||
funcs[f] = true
|
||||
|
||||
// If we've seen any dyncalls of this type, mark it reachable,
|
||||
// and add call graph edges.
|
||||
sites, _ := r.dynCallSites.At(S).([]ssa.CallInstruction)
|
||||
for _, site := range sites {
|
||||
r.addEdge(site, f, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// visitDynCall is called each time we encounter a dynamic "call"-mode call.
|
||||
func (r *rta) visitDynCall(site ssa.CallInstruction) {
|
||||
S := site.Common().Signature()
|
||||
|
||||
// Record the call site.
|
||||
sites, _ := r.dynCallSites.At(S).([]ssa.CallInstruction)
|
||||
r.dynCallSites.Set(S, append(sites, site))
|
||||
|
||||
// For each function of signature S that we know is address-taken,
|
||||
// add an edge and mark it reachable.
|
||||
funcs, _ := r.addrTakenFuncsBySig.At(S).(map[*ssa.Function]bool)
|
||||
for g := range funcs {
|
||||
r.addEdge(site, g, true)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------- concrete types × invoke sites ----------
|
||||
|
||||
// addInvokeEdge is called for each new pair (site, C) in the matrix.
|
||||
func (r *rta) addInvokeEdge(site ssa.CallInstruction, C types.Type) {
|
||||
// Ascertain the concrete method of C to be called.
|
||||
imethod := site.Common().Method
|
||||
cmethod := r.prog.MethodValue(r.prog.MethodSets.MethodSet(C).Lookup(imethod.Pkg(), imethod.Name()))
|
||||
r.addEdge(site, cmethod, true)
|
||||
}
|
||||
|
||||
// visitInvoke is called each time the algorithm encounters an "invoke"-mode call.
|
||||
func (r *rta) visitInvoke(site ssa.CallInstruction) {
|
||||
I := site.Common().Value.Type().Underlying().(*types.Interface)
|
||||
|
||||
// Record the invoke site.
|
||||
sites, _ := r.invokeSites.At(I).([]ssa.CallInstruction)
|
||||
r.invokeSites.Set(I, append(sites, site))
|
||||
|
||||
// Add callgraph edge for each existing
|
||||
// address-taken concrete type implementing I.
|
||||
for _, C := range r.implementations(I) {
|
||||
r.addInvokeEdge(site, C)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------- main algorithm ----------
|
||||
|
||||
// visitFunc processes function f.
|
||||
func (r *rta) visitFunc(f *ssa.Function) {
|
||||
var space [32]*ssa.Value // preallocate space for common case
|
||||
|
||||
for _, b := range f.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
rands := instr.Operands(space[:0])
|
||||
|
||||
switch instr := instr.(type) {
|
||||
case ssa.CallInstruction:
|
||||
call := instr.Common()
|
||||
if call.IsInvoke() {
|
||||
r.visitInvoke(instr)
|
||||
} else if g := call.StaticCallee(); g != nil {
|
||||
r.addEdge(instr, g, false)
|
||||
} else if _, ok := call.Value.(*ssa.Builtin); !ok {
|
||||
r.visitDynCall(instr)
|
||||
}
|
||||
|
||||
// Ignore the call-position operand when
|
||||
// looking for address-taken Functions.
|
||||
// Hack: assume this is rands[0].
|
||||
rands = rands[1:]
|
||||
|
||||
case *ssa.MakeInterface:
|
||||
r.addRuntimeType(instr.X.Type(), false)
|
||||
}
|
||||
|
||||
// Process all address-taken functions.
|
||||
for _, op := range rands {
|
||||
if g, ok := (*op).(*ssa.Function); ok {
|
||||
r.visitAddrTakenFunc(g)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Analyze performs Rapid Type Analysis, starting at the specified root
|
||||
// functions. It returns nil if no roots were specified.
|
||||
//
|
||||
// If buildCallGraph is true, Result.CallGraph will contain a call
|
||||
// graph; otherwise, only the other fields (reachable functions) are
|
||||
// populated.
|
||||
//
|
||||
func Analyze(roots []*ssa.Function, buildCallGraph bool) *Result {
|
||||
if len(roots) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r := &rta{
|
||||
result: &Result{Reachable: make(map[*ssa.Function]struct{ AddrTaken bool })},
|
||||
prog: roots[0].Prog,
|
||||
}
|
||||
|
||||
if buildCallGraph {
|
||||
// TODO(adonovan): change callgraph API to eliminate the
|
||||
// notion of a distinguished root node. Some callgraphs
|
||||
// have many roots, or none.
|
||||
r.result.CallGraph = callgraph.New(roots[0])
|
||||
}
|
||||
|
||||
hasher := typeutil.MakeHasher()
|
||||
r.result.RuntimeTypes.SetHasher(hasher)
|
||||
r.addrTakenFuncsBySig.SetHasher(hasher)
|
||||
r.dynCallSites.SetHasher(hasher)
|
||||
r.invokeSites.SetHasher(hasher)
|
||||
r.concreteTypes.SetHasher(hasher)
|
||||
r.interfaceTypes.SetHasher(hasher)
|
||||
|
||||
// Visit functions, processing their instructions, and adding
|
||||
// new functions to the worklist, until a fixed point is
|
||||
// reached.
|
||||
var shadow []*ssa.Function // for efficiency, we double-buffer the worklist
|
||||
r.worklist = append(r.worklist, roots...)
|
||||
for len(r.worklist) > 0 {
|
||||
shadow, r.worklist = r.worklist, shadow[:0]
|
||||
for _, f := range shadow {
|
||||
r.visitFunc(f)
|
||||
}
|
||||
}
|
||||
return r.result
|
||||
}
|
||||
|
||||
// interfaces(C) returns all currently known interfaces implemented by C.
|
||||
func (r *rta) interfaces(C types.Type) []*types.Interface {
|
||||
// Ascertain set of interfaces C implements
|
||||
// and update 'implements' relation.
|
||||
var ifaces []*types.Interface
|
||||
r.interfaceTypes.Iterate(func(I types.Type, concs interface{}) {
|
||||
if I := I.(*types.Interface); types.Implements(C, I) {
|
||||
concs, _ := concs.([]types.Type)
|
||||
r.interfaceTypes.Set(I, append(concs, C))
|
||||
ifaces = append(ifaces, I)
|
||||
}
|
||||
})
|
||||
r.concreteTypes.Set(C, ifaces)
|
||||
return ifaces
|
||||
}
|
||||
|
||||
// implementations(I) returns all currently known concrete types that implement I.
|
||||
func (r *rta) implementations(I *types.Interface) []types.Type {
|
||||
var concs []types.Type
|
||||
if v := r.interfaceTypes.At(I); v != nil {
|
||||
concs = v.([]types.Type)
|
||||
} else {
|
||||
// First time seeing this interface.
|
||||
// Update the 'implements' relation.
|
||||
r.concreteTypes.Iterate(func(C types.Type, ifaces interface{}) {
|
||||
if types.Implements(C, I) {
|
||||
ifaces, _ := ifaces.([]*types.Interface)
|
||||
r.concreteTypes.Set(C, append(ifaces, I))
|
||||
concs = append(concs, C)
|
||||
}
|
||||
})
|
||||
r.interfaceTypes.Set(I, concs)
|
||||
}
|
||||
return concs
|
||||
}
|
||||
|
||||
// addRuntimeType is called for each concrete type that can be the
|
||||
// dynamic type of some interface or reflect.Value.
|
||||
// Adapted from needMethods in go/ssa/builder.go
|
||||
//
|
||||
func (r *rta) addRuntimeType(T types.Type, skip bool) {
|
||||
if prev, ok := r.result.RuntimeTypes.At(T).(bool); ok {
|
||||
if skip && !prev {
|
||||
r.result.RuntimeTypes.Set(T, skip)
|
||||
}
|
||||
return
|
||||
}
|
||||
r.result.RuntimeTypes.Set(T, skip)
|
||||
|
||||
mset := r.prog.MethodSets.MethodSet(T)
|
||||
|
||||
if _, ok := T.Underlying().(*types.Interface); !ok {
|
||||
// T is a new concrete type.
|
||||
for i, n := 0, mset.Len(); i < n; i++ {
|
||||
sel := mset.At(i)
|
||||
m := sel.Obj()
|
||||
|
||||
if m.Exported() {
|
||||
// Exported methods are always potentially callable via reflection.
|
||||
r.addReachable(r.prog.MethodValue(sel), true)
|
||||
}
|
||||
}
|
||||
|
||||
// Add callgraph edge for each existing dynamic
|
||||
// "invoke"-mode call via that interface.
|
||||
for _, I := range r.interfaces(T) {
|
||||
sites, _ := r.invokeSites.At(I).([]ssa.CallInstruction)
|
||||
for _, site := range sites {
|
||||
r.addInvokeEdge(site, T)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Precondition: T is not a method signature (*Signature with Recv()!=nil).
|
||||
// Recursive case: skip => don't call makeMethods(T).
|
||||
// Each package maintains its own set of types it has visited.
|
||||
|
||||
var n *types.Named
|
||||
switch T := T.(type) {
|
||||
case *types.Named:
|
||||
n = T
|
||||
case *types.Pointer:
|
||||
n, _ = T.Elem().(*types.Named)
|
||||
}
|
||||
if n != nil {
|
||||
owner := n.Obj().Pkg()
|
||||
if owner == nil {
|
||||
return // built-in error type
|
||||
}
|
||||
}
|
||||
|
||||
// Recursion over signatures of each exported method.
|
||||
for i := 0; i < mset.Len(); i++ {
|
||||
if mset.At(i).Obj().Exported() {
|
||||
sig := mset.At(i).Type().(*types.Signature)
|
||||
r.addRuntimeType(sig.Params(), true) // skip the Tuple itself
|
||||
r.addRuntimeType(sig.Results(), true) // skip the Tuple itself
|
||||
}
|
||||
}
|
||||
|
||||
switch t := T.(type) {
|
||||
case *types.Basic:
|
||||
// nop
|
||||
|
||||
case *types.Interface:
|
||||
// nop---handled by recursion over method set.
|
||||
|
||||
case *types.Pointer:
|
||||
r.addRuntimeType(t.Elem(), false)
|
||||
|
||||
case *types.Slice:
|
||||
r.addRuntimeType(t.Elem(), false)
|
||||
|
||||
case *types.Chan:
|
||||
r.addRuntimeType(t.Elem(), false)
|
||||
|
||||
case *types.Map:
|
||||
r.addRuntimeType(t.Key(), false)
|
||||
r.addRuntimeType(t.Elem(), false)
|
||||
|
||||
case *types.Signature:
|
||||
if t.Recv() != nil {
|
||||
panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv()))
|
||||
}
|
||||
r.addRuntimeType(t.Params(), true) // skip the Tuple itself
|
||||
r.addRuntimeType(t.Results(), true) // skip the Tuple itself
|
||||
|
||||
case *types.Named:
|
||||
// A pointer-to-named type can be derived from a named
|
||||
// type via reflection. It may have methods too.
|
||||
r.addRuntimeType(types.NewPointer(T), false)
|
||||
|
||||
// Consider 'type T struct{S}' where S has methods.
|
||||
// Reflection provides no way to get from T to struct{S},
|
||||
// only to S, so the method set of struct{S} is unwanted,
|
||||
// so set 'skip' flag during recursion.
|
||||
r.addRuntimeType(t.Underlying(), true)
|
||||
|
||||
case *types.Array:
|
||||
r.addRuntimeType(t.Elem(), false)
|
||||
|
||||
case *types.Struct:
|
||||
for i, n := 0, t.NumFields(); i < n; i++ {
|
||||
r.addRuntimeType(t.Field(i).Type(), false)
|
||||
}
|
||||
|
||||
case *types.Tuple:
|
||||
for i, n := 0, t.Len(); i < n; i++ {
|
||||
r.addRuntimeType(t.At(i).Type(), false)
|
||||
}
|
||||
|
||||
default:
|
||||
panic(T)
|
||||
}
|
||||
}
|
||||
35
vendor/golang.org/x/tools/go/callgraph/static/static.go
generated
vendored
Normal file
35
vendor/golang.org/x/tools/go/callgraph/static/static.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// Package static computes the call graph of a Go program containing
|
||||
// only static call edges.
|
||||
package static // import "golang.org/x/tools/go/callgraph/static"
|
||||
|
||||
import (
|
||||
"golang.org/x/tools/go/callgraph"
|
||||
"golang.org/x/tools/go/ssa"
|
||||
"golang.org/x/tools/go/ssa/ssautil"
|
||||
)
|
||||
|
||||
// CallGraph computes the call graph of the specified program
|
||||
// considering only static calls.
|
||||
//
|
||||
func CallGraph(prog *ssa.Program) *callgraph.Graph {
|
||||
cg := callgraph.New(nil) // TODO(adonovan) eliminate concept of rooted callgraph
|
||||
|
||||
// TODO(adonovan): opt: use only a single pass over the ssa.Program.
|
||||
// TODO(adonovan): opt: this is slower than RTA (perhaps because
|
||||
// the lower precision means so many edges are allocated)!
|
||||
for f := range ssautil.AllFunctions(prog) {
|
||||
fnode := cg.CreateNode(f)
|
||||
for _, b := range f.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
if site, ok := instr.(ssa.CallInstruction); ok {
|
||||
if g := site.Common().StaticCallee(); g != nil {
|
||||
gnode := cg.CreateNode(g)
|
||||
callgraph.AddEdge(fnode, site, gnode)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cg
|
||||
}
|
||||
181
vendor/golang.org/x/tools/go/callgraph/util.go
generated
vendored
Normal file
181
vendor/golang.org/x/tools/go/callgraph/util.go
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package callgraph
|
||||
|
||||
import "golang.org/x/tools/go/ssa"
|
||||
|
||||
// This file provides various utilities over call graphs, such as
|
||||
// visitation and path search.
|
||||
|
||||
// CalleesOf returns a new set containing all direct callees of the
|
||||
// caller node.
|
||||
//
|
||||
func CalleesOf(caller *Node) map[*Node]bool {
|
||||
callees := make(map[*Node]bool)
|
||||
for _, e := range caller.Out {
|
||||
callees[e.Callee] = true
|
||||
}
|
||||
return callees
|
||||
}
|
||||
|
||||
// GraphVisitEdges visits all the edges in graph g in depth-first order.
|
||||
// The edge function is called for each edge in postorder. If it
|
||||
// returns non-nil, visitation stops and GraphVisitEdges returns that
|
||||
// value.
|
||||
//
|
||||
func GraphVisitEdges(g *Graph, edge func(*Edge) error) error {
|
||||
seen := make(map[*Node]bool)
|
||||
var visit func(n *Node) error
|
||||
visit = func(n *Node) error {
|
||||
if !seen[n] {
|
||||
seen[n] = true
|
||||
for _, e := range n.Out {
|
||||
if err := visit(e.Callee); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := edge(e); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, n := range g.Nodes {
|
||||
if err := visit(n); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PathSearch finds an arbitrary path starting at node start and
|
||||
// ending at some node for which isEnd() returns true. On success,
|
||||
// PathSearch returns the path as an ordered list of edges; on
|
||||
// failure, it returns nil.
|
||||
//
|
||||
func PathSearch(start *Node, isEnd func(*Node) bool) []*Edge {
|
||||
stack := make([]*Edge, 0, 32)
|
||||
seen := make(map[*Node]bool)
|
||||
var search func(n *Node) []*Edge
|
||||
search = func(n *Node) []*Edge {
|
||||
if !seen[n] {
|
||||
seen[n] = true
|
||||
if isEnd(n) {
|
||||
return stack
|
||||
}
|
||||
for _, e := range n.Out {
|
||||
stack = append(stack, e) // push
|
||||
if found := search(e.Callee); found != nil {
|
||||
return found
|
||||
}
|
||||
stack = stack[:len(stack)-1] // pop
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return search(start)
|
||||
}
|
||||
|
||||
// DeleteSyntheticNodes removes from call graph g all nodes for
|
||||
// synthetic functions (except g.Root and package initializers),
|
||||
// preserving the topology. In effect, calls to synthetic wrappers
|
||||
// are "inlined".
|
||||
//
|
||||
func (g *Graph) DeleteSyntheticNodes() {
|
||||
// Measurements on the standard library and go.tools show that
|
||||
// resulting graph has ~15% fewer nodes and 4-8% fewer edges
|
||||
// than the input.
|
||||
//
|
||||
// Inlining a wrapper of in-degree m, out-degree n adds m*n
|
||||
// and removes m+n edges. Since most wrappers are monomorphic
|
||||
// (n=1) this results in a slight reduction. Polymorphic
|
||||
// wrappers (n>1), e.g. from embedding an interface value
|
||||
// inside a struct to satisfy some interface, cause an
|
||||
// increase in the graph, but they seem to be uncommon.
|
||||
|
||||
// Hash all existing edges to avoid creating duplicates.
|
||||
edges := make(map[Edge]bool)
|
||||
for _, cgn := range g.Nodes {
|
||||
for _, e := range cgn.Out {
|
||||
edges[*e] = true
|
||||
}
|
||||
}
|
||||
for fn, cgn := range g.Nodes {
|
||||
if cgn == g.Root || fn.Synthetic == "" || isInit(cgn.Func) {
|
||||
continue // keep
|
||||
}
|
||||
for _, eIn := range cgn.In {
|
||||
for _, eOut := range cgn.Out {
|
||||
newEdge := Edge{eIn.Caller, eIn.Site, eOut.Callee}
|
||||
if edges[newEdge] {
|
||||
continue // don't add duplicate
|
||||
}
|
||||
AddEdge(eIn.Caller, eIn.Site, eOut.Callee)
|
||||
edges[newEdge] = true
|
||||
}
|
||||
}
|
||||
g.DeleteNode(cgn)
|
||||
}
|
||||
}
|
||||
|
||||
func isInit(fn *ssa.Function) bool {
|
||||
return fn.Pkg != nil && fn.Pkg.Func("init") == fn
|
||||
}
|
||||
|
||||
// DeleteNode removes node n and its edges from the graph g.
|
||||
// (NB: not efficient for batch deletion.)
|
||||
func (g *Graph) DeleteNode(n *Node) {
|
||||
n.deleteIns()
|
||||
n.deleteOuts()
|
||||
delete(g.Nodes, n.Func)
|
||||
}
|
||||
|
||||
// deleteIns deletes all incoming edges to n.
|
||||
func (n *Node) deleteIns() {
|
||||
for _, e := range n.In {
|
||||
removeOutEdge(e)
|
||||
}
|
||||
n.In = nil
|
||||
}
|
||||
|
||||
// deleteOuts deletes all outgoing edges from n.
|
||||
func (n *Node) deleteOuts() {
|
||||
for _, e := range n.Out {
|
||||
removeInEdge(e)
|
||||
}
|
||||
n.Out = nil
|
||||
}
|
||||
|
||||
// removeOutEdge removes edge.Caller's outgoing edge 'edge'.
|
||||
func removeOutEdge(edge *Edge) {
|
||||
caller := edge.Caller
|
||||
n := len(caller.Out)
|
||||
for i, e := range caller.Out {
|
||||
if e == edge {
|
||||
// Replace it with the final element and shrink the slice.
|
||||
caller.Out[i] = caller.Out[n-1]
|
||||
caller.Out[n-1] = nil // aid GC
|
||||
caller.Out = caller.Out[:n-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
panic("edge not found: " + edge.String())
|
||||
}
|
||||
|
||||
// removeInEdge removes edge.Callee's incoming edge 'edge'.
|
||||
func removeInEdge(edge *Edge) {
|
||||
caller := edge.Callee
|
||||
n := len(caller.In)
|
||||
for i, e := range caller.In {
|
||||
if e == edge {
|
||||
// Replace it with the final element and shrink the slice.
|
||||
caller.In[i] = caller.In[n-1]
|
||||
caller.In[n-1] = nil // aid GC
|
||||
caller.In = caller.In[:n-1]
|
||||
return
|
||||
}
|
||||
}
|
||||
panic("edge not found: " + edge.String())
|
||||
}
|
||||
220
vendor/golang.org/x/tools/go/internal/cgo/cgo.go
generated
vendored
Normal file
220
vendor/golang.org/x/tools/go/internal/cgo/cgo.go
generated
vendored
Normal file
@@ -0,0 +1,220 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cgo
|
||||
|
||||
// This file handles cgo preprocessing of files containing `import "C"`.
|
||||
//
|
||||
// DESIGN
|
||||
//
|
||||
// The approach taken is to run the cgo processor on the package's
|
||||
// CgoFiles and parse the output, faking the filenames of the
|
||||
// resulting ASTs so that the synthetic file containing the C types is
|
||||
// called "C" (e.g. "~/go/src/net/C") and the preprocessed files
|
||||
// have their original names (e.g. "~/go/src/net/cgo_unix.go"),
|
||||
// not the names of the actual temporary files.
|
||||
//
|
||||
// The advantage of this approach is its fidelity to 'go build'. The
|
||||
// downside is that the token.Position.Offset for each AST node is
|
||||
// incorrect, being an offset within the temporary file. Line numbers
|
||||
// should still be correct because of the //line comments.
|
||||
//
|
||||
// The logic of this file is mostly plundered from the 'go build'
|
||||
// tool, which also invokes the cgo preprocessor.
|
||||
//
|
||||
//
|
||||
// REJECTED ALTERNATIVE
|
||||
//
|
||||
// An alternative approach that we explored is to extend go/types'
|
||||
// Importer mechanism to provide the identity of the importing package
|
||||
// so that each time `import "C"` appears it resolves to a different
|
||||
// synthetic package containing just the objects needed in that case.
|
||||
// The loader would invoke cgo but parse only the cgo_types.go file
|
||||
// defining the package-level objects, discarding the other files
|
||||
// resulting from preprocessing.
|
||||
//
|
||||
// The benefit of this approach would have been that source-level
|
||||
// syntax information would correspond exactly to the original cgo
|
||||
// file, with no preprocessing involved, making source tools like
|
||||
// godoc, guru, and eg happy. However, the approach was rejected
|
||||
// due to the additional complexity it would impose on go/types. (It
|
||||
// made for a beautiful demo, though.)
|
||||
//
|
||||
// cgo files, despite their *.go extension, are not legal Go source
|
||||
// files per the specification since they may refer to unexported
|
||||
// members of package "C" such as C.int. Also, a function such as
|
||||
// C.getpwent has in effect two types, one matching its C type and one
|
||||
// which additionally returns (errno C.int). The cgo preprocessor
|
||||
// uses name mangling to distinguish these two functions in the
|
||||
// processed code, but go/types would need to duplicate this logic in
|
||||
// its handling of function calls, analogous to the treatment of map
|
||||
// lookups in which y=m[k] and y,ok=m[k] are both legal.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses
|
||||
// the output and returns the resulting ASTs.
|
||||
//
|
||||
func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) {
|
||||
tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
pkgdir := bp.Dir
|
||||
if DisplayPath != nil {
|
||||
pkgdir = DisplayPath(pkgdir)
|
||||
}
|
||||
|
||||
cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var files []*ast.File
|
||||
for i := range cgoFiles {
|
||||
rd, err := os.Open(cgoFiles[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
display := filepath.Join(bp.Dir, cgoDisplayFiles[i])
|
||||
f, err := parser.ParseFile(fset, display, rd, mode)
|
||||
rd.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files = append(files, f)
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
var cgoRe = regexp.MustCompile(`[/\\:]`)
|
||||
|
||||
// Run invokes the cgo preprocessor on bp.CgoFiles and returns two
|
||||
// lists of files: the resulting processed files (in temporary
|
||||
// directory tmpdir) and the corresponding names of the unprocessed files.
|
||||
//
|
||||
// Run is adapted from (*builder).cgo in
|
||||
// $GOROOT/src/cmd/go/build.go, but these features are unsupported:
|
||||
// Objective C, CGOPKGPATH, CGO_FLAGS.
|
||||
//
|
||||
// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in
|
||||
// to the cgo preprocessor. This in turn will set the // line comments
|
||||
// referring to those files to use absolute paths. This is needed for
|
||||
// go/packages using the legacy go list support so it is able to find
|
||||
// the original files.
|
||||
func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) {
|
||||
cgoCPPFLAGS, _, _, _ := cflags(bp, true)
|
||||
_, cgoexeCFLAGS, _, _ := cflags(bp, false)
|
||||
|
||||
if len(bp.CgoPkgConfig) > 0 {
|
||||
pcCFLAGS, err := pkgConfigFlags(bp)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
|
||||
}
|
||||
|
||||
// Allows including _cgo_export.h from .[ch] files in the package.
|
||||
cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir)
|
||||
|
||||
// _cgo_gotypes.go (displayed "C") contains the type definitions.
|
||||
files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go"))
|
||||
displayFiles = append(displayFiles, "C")
|
||||
for _, fn := range bp.CgoFiles {
|
||||
// "foo.cgo1.go" (displayed "foo.go") is the processed Go source.
|
||||
f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_")
|
||||
files = append(files, filepath.Join(tmpdir, f+"cgo1.go"))
|
||||
displayFiles = append(displayFiles, fn)
|
||||
}
|
||||
|
||||
var cgoflags []string
|
||||
if bp.Goroot && bp.ImportPath == "runtime/cgo" {
|
||||
cgoflags = append(cgoflags, "-import_runtime_cgo=false")
|
||||
}
|
||||
if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" {
|
||||
cgoflags = append(cgoflags, "-import_syscall=false")
|
||||
}
|
||||
|
||||
var cgoFiles []string = bp.CgoFiles
|
||||
if useabs {
|
||||
cgoFiles = make([]string, len(bp.CgoFiles))
|
||||
for i := range cgoFiles {
|
||||
cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i])
|
||||
}
|
||||
}
|
||||
|
||||
args := stringList(
|
||||
"go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--",
|
||||
cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles,
|
||||
)
|
||||
if false {
|
||||
log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir)
|
||||
}
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Dir = pkgdir
|
||||
cmd.Stdout = os.Stderr
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err)
|
||||
}
|
||||
|
||||
return files, displayFiles, nil
|
||||
}
|
||||
|
||||
// -- unmodified from 'go build' ---------------------------------------
|
||||
|
||||
// Return the flags to use when invoking the C or C++ compilers, or cgo.
|
||||
func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) {
|
||||
var defaults string
|
||||
if def {
|
||||
defaults = "-g -O2"
|
||||
}
|
||||
|
||||
cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
|
||||
cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
|
||||
cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS)
|
||||
ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS)
|
||||
return
|
||||
}
|
||||
|
||||
// envList returns the value of the given environment variable broken
|
||||
// into fields, using the default value when the variable is empty.
|
||||
func envList(key, def string) []string {
|
||||
v := os.Getenv(key)
|
||||
if v == "" {
|
||||
v = def
|
||||
}
|
||||
return strings.Fields(v)
|
||||
}
|
||||
|
||||
// stringList's arguments should be a sequence of string or []string values.
|
||||
// stringList flattens them into a single []string.
|
||||
func stringList(args ...interface{}) []string {
|
||||
var x []string
|
||||
for _, arg := range args {
|
||||
switch arg := arg.(type) {
|
||||
case []string:
|
||||
x = append(x, arg...)
|
||||
case string:
|
||||
x = append(x, arg)
|
||||
default:
|
||||
panic("stringList: invalid argument")
|
||||
}
|
||||
}
|
||||
return x
|
||||
}
|
||||
39
vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
generated
vendored
Normal file
39
vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cgo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints.
|
||||
func pkgConfig(mode string, pkgs []string) (flags []string, err error) {
|
||||
cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err)
|
||||
if len(out) > 0 {
|
||||
s = fmt.Sprintf("%s: %s", s, out)
|
||||
}
|
||||
return nil, errors.New(s)
|
||||
}
|
||||
if len(out) > 0 {
|
||||
flags = strings.Fields(string(out))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// pkgConfigFlags calls pkg-config if needed and returns the cflags
|
||||
// needed to build the package.
|
||||
func pkgConfigFlags(p *build.Package) (cflags []string, err error) {
|
||||
if len(p.CgoPkgConfig) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return pkgConfig("--cflags", p.CgoPkgConfig)
|
||||
}
|
||||
17
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
generated
vendored
17
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
generated
vendored
@@ -81,13 +81,13 @@ func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, u
|
||||
args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"}
|
||||
args = append(args, buildFlags...)
|
||||
args = append(args, "--", "unsafe")
|
||||
stdout, err := InvokeGo(ctx, env, dir, usesExportData, args...)
|
||||
stdout, stderr, err := invokeGo(ctx, env, dir, usesExportData, args...)
|
||||
var goarch, compiler string
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "cannot find main module") {
|
||||
// User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc.
|
||||
// TODO(matloob): Is this a problem in practice?
|
||||
envout, enverr := InvokeGo(ctx, env, dir, usesExportData, "env", "GOARCH")
|
||||
envout, _, enverr := invokeGo(ctx, env, dir, usesExportData, "env", "GOARCH")
|
||||
if enverr != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -99,7 +99,8 @@ func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, u
|
||||
} else {
|
||||
fields := strings.Fields(stdout.String())
|
||||
if len(fields) < 2 {
|
||||
return nil, fmt.Errorf("could not determine GOARCH and Go compiler")
|
||||
return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\" from stdout of go command:\n%s\ndir: %s\nstdout: <<%s>>\nstderr: <<%s>>",
|
||||
cmdDebugStr(env, args...), dir, stdout.String(), stderr.String())
|
||||
}
|
||||
goarch = fields[0]
|
||||
compiler = fields[1]
|
||||
@@ -107,8 +108,8 @@ func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, u
|
||||
return types.SizesFor(compiler, goarch), nil
|
||||
}
|
||||
|
||||
// InvokeGo returns the stdout of a go command invocation.
|
||||
func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, error) {
|
||||
// invokeGo returns the stdout and stderr of a go command invocation.
|
||||
func invokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, *bytes.Buffer, error) {
|
||||
if debug {
|
||||
defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now())
|
||||
}
|
||||
@@ -131,7 +132,7 @@ func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool
|
||||
// Catastrophic error:
|
||||
// - executable not found
|
||||
// - context cancellation
|
||||
return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
|
||||
return nil, nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
|
||||
}
|
||||
|
||||
// Export mode entails a build.
|
||||
@@ -139,7 +140,7 @@ func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool
|
||||
// (despite the -e flag) and the Export field is blank.
|
||||
// Do not fail in that case.
|
||||
if !usesExportData {
|
||||
return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
|
||||
return nil, nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,7 +159,7 @@ func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool
|
||||
fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout)
|
||||
}
|
||||
|
||||
return stdout, nil
|
||||
return stdout, stderr, nil
|
||||
}
|
||||
|
||||
func cmdDebugStr(envlist []string, args ...string) string {
|
||||
|
||||
204
vendor/golang.org/x/tools/go/loader/doc.go
generated
vendored
Normal file
204
vendor/golang.org/x/tools/go/loader/doc.go
generated
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package loader loads a complete Go program from source code, parsing
|
||||
// and type-checking the initial packages plus their transitive closure
|
||||
// of dependencies. The ASTs and the derived facts are retained for
|
||||
// later use.
|
||||
//
|
||||
// Deprecated: This is an older API and does not have support
|
||||
// for modules. Use golang.org/x/tools/go/packages instead.
|
||||
//
|
||||
// The package defines two primary types: Config, which specifies a
|
||||
// set of initial packages to load and various other options; and
|
||||
// Program, which is the result of successfully loading the packages
|
||||
// specified by a configuration.
|
||||
//
|
||||
// The configuration can be set directly, but *Config provides various
|
||||
// convenience methods to simplify the common cases, each of which can
|
||||
// be called any number of times. Finally, these are followed by a
|
||||
// call to Load() to actually load and type-check the program.
|
||||
//
|
||||
// var conf loader.Config
|
||||
//
|
||||
// // Use the command-line arguments to specify
|
||||
// // a set of initial packages to load from source.
|
||||
// // See FromArgsUsage for help.
|
||||
// rest, err := conf.FromArgs(os.Args[1:], wantTests)
|
||||
//
|
||||
// // Parse the specified files and create an ad hoc package with path "foo".
|
||||
// // All files must have the same 'package' declaration.
|
||||
// conf.CreateFromFilenames("foo", "foo.go", "bar.go")
|
||||
//
|
||||
// // Create an ad hoc package with path "foo" from
|
||||
// // the specified already-parsed files.
|
||||
// // All ASTs must have the same 'package' declaration.
|
||||
// conf.CreateFromFiles("foo", parsedFiles)
|
||||
//
|
||||
// // Add "runtime" to the set of packages to be loaded.
|
||||
// conf.Import("runtime")
|
||||
//
|
||||
// // Adds "fmt" and "fmt_test" to the set of packages
|
||||
// // to be loaded. "fmt" will include *_test.go files.
|
||||
// conf.ImportWithTests("fmt")
|
||||
//
|
||||
// // Finally, load all the packages specified by the configuration.
|
||||
// prog, err := conf.Load()
|
||||
//
|
||||
// See examples_test.go for examples of API usage.
|
||||
//
|
||||
//
|
||||
// CONCEPTS AND TERMINOLOGY
|
||||
//
|
||||
// The WORKSPACE is the set of packages accessible to the loader. The
|
||||
// workspace is defined by Config.Build, a *build.Context. The
|
||||
// default context treats subdirectories of $GOROOT and $GOPATH as
|
||||
// packages, but this behavior may be overridden.
|
||||
//
|
||||
// An AD HOC package is one specified as a set of source files on the
|
||||
// command line. In the simplest case, it may consist of a single file
|
||||
// such as $GOROOT/src/net/http/triv.go.
|
||||
//
|
||||
// EXTERNAL TEST packages are those comprised of a set of *_test.go
|
||||
// files all with the same 'package foo_test' declaration, all in the
|
||||
// same directory. (go/build.Package calls these files XTestFiles.)
|
||||
//
|
||||
// An IMPORTABLE package is one that can be referred to by some import
|
||||
// spec. Every importable package is uniquely identified by its
|
||||
// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json",
|
||||
// or "cmd/vendor/golang.org/x/arch/x86/x86asm". A package path
|
||||
// typically denotes a subdirectory of the workspace.
|
||||
//
|
||||
// An import declaration uses an IMPORT PATH to refer to a package.
|
||||
// Most import declarations use the package path as the import path.
|
||||
//
|
||||
// Due to VENDORING (https://golang.org/s/go15vendor), the
|
||||
// interpretation of an import path may depend on the directory in which
|
||||
// it appears. To resolve an import path to a package path, go/build
|
||||
// must search the enclosing directories for a subdirectory named
|
||||
// "vendor".
|
||||
//
|
||||
// ad hoc packages and external test packages are NON-IMPORTABLE. The
|
||||
// path of an ad hoc package is inferred from the package
|
||||
// declarations of its files and is therefore not a unique package key.
|
||||
// For example, Config.CreatePkgs may specify two initial ad hoc
|
||||
// packages, both with path "main".
|
||||
//
|
||||
// An AUGMENTED package is an importable package P plus all the
|
||||
// *_test.go files with same 'package foo' declaration as P.
|
||||
// (go/build.Package calls these files TestFiles.)
|
||||
//
|
||||
// The INITIAL packages are those specified in the configuration. A
|
||||
// DEPENDENCY is a package loaded to satisfy an import in an initial
|
||||
// package or another dependency.
|
||||
//
|
||||
package loader
|
||||
|
||||
// IMPLEMENTATION NOTES
|
||||
//
|
||||
// 'go test', in-package test files, and import cycles
|
||||
// ---------------------------------------------------
|
||||
//
|
||||
// An external test package may depend upon members of the augmented
|
||||
// package that are not in the unaugmented package, such as functions
|
||||
// that expose internals. (See bufio/export_test.go for an example.)
|
||||
// So, the loader must ensure that for each external test package
|
||||
// it loads, it also augments the corresponding non-test package.
|
||||
//
|
||||
// The import graph over n unaugmented packages must be acyclic; the
|
||||
// import graph over n-1 unaugmented packages plus one augmented
|
||||
// package must also be acyclic. ('go test' relies on this.) But the
|
||||
// import graph over n augmented packages may contain cycles.
|
||||
//
|
||||
// First, all the (unaugmented) non-test packages and their
|
||||
// dependencies are imported in the usual way; the loader reports an
|
||||
// error if it detects an import cycle.
|
||||
//
|
||||
// Then, each package P for which testing is desired is augmented by
|
||||
// the list P' of its in-package test files, by calling
|
||||
// (*types.Checker).Files. This arrangement ensures that P' may
|
||||
// reference definitions within P, but P may not reference definitions
|
||||
// within P'. Furthermore, P' may import any other package, including
|
||||
// ones that depend upon P, without an import cycle error.
|
||||
//
|
||||
// Consider two packages A and B, both of which have lists of
|
||||
// in-package test files we'll call A' and B', and which have the
|
||||
// following import graph edges:
|
||||
// B imports A
|
||||
// B' imports A
|
||||
// A' imports B
|
||||
// This last edge would be expected to create an error were it not
|
||||
// for the special type-checking discipline above.
|
||||
// Cycles of size greater than two are possible. For example:
|
||||
// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil"
|
||||
// io/ioutil/tempfile_test.go (package ioutil) imports "regexp"
|
||||
// regexp/exec_test.go (package regexp) imports "compress/bzip2"
|
||||
//
|
||||
//
|
||||
// Concurrency
|
||||
// -----------
|
||||
//
|
||||
// Let us define the import dependency graph as follows. Each node is a
|
||||
// list of files passed to (Checker).Files at once. Many of these lists
|
||||
// are the production code of an importable Go package, so those nodes
|
||||
// are labelled by the package's path. The remaining nodes are
|
||||
// ad hoc packages and lists of in-package *_test.go files that augment
|
||||
// an importable package; those nodes have no label.
|
||||
//
|
||||
// The edges of the graph represent import statements appearing within a
|
||||
// file. An edge connects a node (a list of files) to the node it
|
||||
// imports, which is importable and thus always labelled.
|
||||
//
|
||||
// Loading is controlled by this dependency graph.
|
||||
//
|
||||
// To reduce I/O latency, we start loading a package's dependencies
|
||||
// asynchronously as soon as we've parsed its files and enumerated its
|
||||
// imports (scanImports). This performs a preorder traversal of the
|
||||
// import dependency graph.
|
||||
//
|
||||
// To exploit hardware parallelism, we type-check unrelated packages in
|
||||
// parallel, where "unrelated" means not ordered by the partial order of
|
||||
// the import dependency graph.
|
||||
//
|
||||
// We use a concurrency-safe non-blocking cache (importer.imported) to
|
||||
// record the results of type-checking, whether success or failure. An
|
||||
// entry is created in this cache by startLoad the first time the
|
||||
// package is imported. The first goroutine to request an entry becomes
|
||||
// responsible for completing the task and broadcasting completion to
|
||||
// subsequent requestors, which block until then.
|
||||
//
|
||||
// Type checking occurs in (parallel) postorder: we cannot type-check a
|
||||
// set of files until we have loaded and type-checked all of their
|
||||
// immediate dependencies (and thus all of their transitive
|
||||
// dependencies). If the input were guaranteed free of import cycles,
|
||||
// this would be trivial: we could simply wait for completion of the
|
||||
// dependencies and then invoke the typechecker.
|
||||
//
|
||||
// But as we saw in the 'go test' section above, some cycles in the
|
||||
// import graph over packages are actually legal, so long as the
|
||||
// cycle-forming edge originates in the in-package test files that
|
||||
// augment the package. This explains why the nodes of the import
|
||||
// dependency graph are not packages, but lists of files: the unlabelled
|
||||
// nodes avoid the cycles. Consider packages A and B where B imports A
|
||||
// and A's in-package tests AT import B. The naively constructed import
|
||||
// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but
|
||||
// the graph over lists of files is AT --> B --> A, where AT is an
|
||||
// unlabelled node.
|
||||
//
|
||||
// Awaiting completion of the dependencies in a cyclic graph would
|
||||
// deadlock, so we must materialize the import dependency graph (as
|
||||
// importer.graph) and check whether each import edge forms a cycle. If
|
||||
// x imports y, and the graph already contains a path from y to x, then
|
||||
// there is an import cycle, in which case the processing of x must not
|
||||
// wait for the completion of processing of y.
|
||||
//
|
||||
// When the type-checker makes a callback (doImport) to the loader for a
|
||||
// given import edge, there are two possible cases. In the normal case,
|
||||
// the dependency has already been completely type-checked; doImport
|
||||
// does a cache lookup and returns it. In the cyclic case, the entry in
|
||||
// the cache is still necessarily incomplete, indicating a cycle. We
|
||||
// perform the cycle check again to obtain the error message, and return
|
||||
// the error.
|
||||
//
|
||||
// The result of using concurrency is about a 2.5x speedup for stdlib_test.
|
||||
1086
vendor/golang.org/x/tools/go/loader/loader.go
generated
vendored
Normal file
1086
vendor/golang.org/x/tools/go/loader/loader.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
124
vendor/golang.org/x/tools/go/loader/util.go
generated
vendored
Normal file
124
vendor/golang.org/x/tools/go/loader/util.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package loader
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/go/buildutil"
|
||||
)
|
||||
|
||||
// We use a counting semaphore to limit
|
||||
// the number of parallel I/O calls per process.
|
||||
var ioLimit = make(chan bool, 10)
|
||||
|
||||
// parseFiles parses the Go source files within directory dir and
|
||||
// returns the ASTs of the ones that could be at least partially parsed,
|
||||
// along with a list of I/O and parse errors encountered.
|
||||
//
|
||||
// I/O is done via ctxt, which may specify a virtual file system.
|
||||
// displayPath is used to transform the filenames attached to the ASTs.
|
||||
//
|
||||
func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) {
|
||||
if displayPath == nil {
|
||||
displayPath = func(path string) string { return path }
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
n := len(files)
|
||||
parsed := make([]*ast.File, n)
|
||||
errors := make([]error, n)
|
||||
for i, file := range files {
|
||||
if !buildutil.IsAbsPath(ctxt, file) {
|
||||
file = buildutil.JoinPath(ctxt, dir, file)
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(i int, file string) {
|
||||
ioLimit <- true // wait
|
||||
defer func() {
|
||||
wg.Done()
|
||||
<-ioLimit // signal
|
||||
}()
|
||||
var rd io.ReadCloser
|
||||
var err error
|
||||
if ctxt.OpenFile != nil {
|
||||
rd, err = ctxt.OpenFile(file)
|
||||
} else {
|
||||
rd, err = os.Open(file)
|
||||
}
|
||||
if err != nil {
|
||||
errors[i] = err // open failed
|
||||
return
|
||||
}
|
||||
|
||||
// ParseFile may return both an AST and an error.
|
||||
parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode)
|
||||
rd.Close()
|
||||
}(i, file)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Eliminate nils, preserving order.
|
||||
var o int
|
||||
for _, f := range parsed {
|
||||
if f != nil {
|
||||
parsed[o] = f
|
||||
o++
|
||||
}
|
||||
}
|
||||
parsed = parsed[:o]
|
||||
|
||||
o = 0
|
||||
for _, err := range errors {
|
||||
if err != nil {
|
||||
errors[o] = err
|
||||
o++
|
||||
}
|
||||
}
|
||||
errors = errors[:o]
|
||||
|
||||
return parsed, errors
|
||||
}
|
||||
|
||||
// scanImports returns the set of all import paths from all
|
||||
// import specs in the specified files.
|
||||
func scanImports(files []*ast.File) map[string]bool {
|
||||
imports := make(map[string]bool)
|
||||
for _, f := range files {
|
||||
for _, decl := range f.Decls {
|
||||
if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT {
|
||||
for _, spec := range decl.Specs {
|
||||
spec := spec.(*ast.ImportSpec)
|
||||
|
||||
// NB: do not assume the program is well-formed!
|
||||
path, err := strconv.Unquote(spec.Path.Value)
|
||||
if err != nil {
|
||||
continue // quietly ignore the error
|
||||
}
|
||||
if path == "C" {
|
||||
continue // skip pseudopackage
|
||||
}
|
||||
imports[path] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return imports
|
||||
}
|
||||
|
||||
// ---------- Internal helpers ----------
|
||||
|
||||
// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos)
|
||||
func tokenFileContainsPos(f *token.File, pos token.Pos) bool {
|
||||
p := int(pos)
|
||||
base := f.Base()
|
||||
return base <= p && p < base+f.Size()
|
||||
}
|
||||
3
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
3
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
@@ -60,8 +60,7 @@ causes Load to run in LoadFiles mode, collecting minimal information.
|
||||
See the documentation for type Config for details.
|
||||
|
||||
As noted earlier, the Config.Mode controls the amount of detail
|
||||
reported about the loaded packages, with each mode returning all the data of the
|
||||
previous mode with some extra added. See the documentation for type LoadMode
|
||||
reported about the loaded packages. See the documentation for type LoadMode
|
||||
for details.
|
||||
|
||||
Most tools should pass their command-line arguments (after any flags)
|
||||
|
||||
7
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
7
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
@@ -84,13 +84,14 @@ func findExternalDriver(cfg *Config) driver {
|
||||
cmd.Stdin = bytes.NewReader(req)
|
||||
cmd.Stdout = buf
|
||||
cmd.Stderr = stderr
|
||||
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr)
|
||||
}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
|
||||
}
|
||||
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr)
|
||||
}
|
||||
|
||||
var response driverResponse
|
||||
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
|
||||
return nil, err
|
||||
|
||||
169
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
169
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
@@ -26,7 +26,6 @@ import (
|
||||
"golang.org/x/tools/go/internal/packagesdriver"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
"golang.org/x/tools/internal/semver"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
// debug controls verbose logging.
|
||||
@@ -254,12 +253,7 @@ func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDedu
|
||||
if len(pkgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
drivercfg := *cfg
|
||||
if getGoInfo().env.modulesOn {
|
||||
drivercfg.BuildFlags = append(drivercfg.BuildFlags, "-mod=readonly")
|
||||
}
|
||||
dr, err := driver(&drivercfg, pkgs...)
|
||||
|
||||
dr, err := driver(cfg, pkgs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -270,10 +264,7 @@ func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDedu
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := addNeededOverlayPackages(cfg, driver, response, needPkgs, getGoInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return addNeededOverlayPackages(cfg, driver, response, needPkgs, getGoInfo)
|
||||
}
|
||||
|
||||
func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string, goInfo func() *goInfo) error {
|
||||
@@ -287,42 +278,43 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q
|
||||
return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
|
||||
}
|
||||
dirResponse, err := driver(cfg, pattern)
|
||||
if err != nil {
|
||||
if err != nil || (len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1) {
|
||||
// There was an error loading the package. Try to load the file as an ad-hoc package.
|
||||
// Usually the error will appear in a returned package, but may not if we're in modules mode
|
||||
// and the ad-hoc is located outside a module.
|
||||
var queryErr error
|
||||
if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil {
|
||||
return err // return the original error
|
||||
dirResponse, queryErr = driver(cfg, query)
|
||||
if queryErr != nil {
|
||||
// Return the original error if the attempt to fall back failed.
|
||||
return err
|
||||
}
|
||||
}
|
||||
// `go list` can report errors for files that are not listed as part of a package's GoFiles.
|
||||
// In the case of an invalid Go file, we should assume that it is part of package if only
|
||||
// one package is in the response. The file may have valid contents in an overlay.
|
||||
if len(dirResponse.Packages) == 1 {
|
||||
pkg := dirResponse.Packages[0]
|
||||
for i, err := range pkg.Errors {
|
||||
s := errorSpan(err)
|
||||
if !s.IsValid() {
|
||||
break
|
||||
}
|
||||
if len(pkg.CompiledGoFiles) == 0 {
|
||||
break
|
||||
}
|
||||
dir := filepath.Dir(pkg.CompiledGoFiles[0])
|
||||
filename := filepath.Join(dir, filepath.Base(s.URI().Filename()))
|
||||
if info, err := os.Stat(filename); err != nil || info.IsDir() {
|
||||
break
|
||||
}
|
||||
if !contains(pkg.CompiledGoFiles, filename) {
|
||||
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename)
|
||||
pkg.GoFiles = append(pkg.GoFiles, filename)
|
||||
pkg.Errors = append(pkg.Errors[:i], pkg.Errors[i+1:]...)
|
||||
}
|
||||
// If we get nothing back from `go list`, try to make this file into its own ad-hoc package.
|
||||
if len(dirResponse.Packages) == 0 && queryErr == nil {
|
||||
dirResponse.Packages = append(dirResponse.Packages, &Package{
|
||||
ID: "command-line-arguments",
|
||||
PkgPath: query,
|
||||
GoFiles: []string{query},
|
||||
CompiledGoFiles: []string{query},
|
||||
Imports: make(map[string]*Package),
|
||||
})
|
||||
dirResponse.Roots = append(dirResponse.Roots, "command-line-arguments")
|
||||
}
|
||||
}
|
||||
// A final attempt to construct an ad-hoc package.
|
||||
if len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1 {
|
||||
var queryErr error
|
||||
if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil {
|
||||
return err // return the original error
|
||||
// Special case to handle issue #33482:
|
||||
// If this is a file= query for ad-hoc packages where the file only exists on an overlay,
|
||||
// and exists outside of a module, add the file in for the package.
|
||||
if len(dirResponse.Packages) == 1 && (dirResponse.Packages[0].ID == "command-line-arguments" ||
|
||||
filepath.ToSlash(dirResponse.Packages[0].PkgPath) == filepath.ToSlash(query)) {
|
||||
if len(dirResponse.Packages[0].GoFiles) == 0 {
|
||||
filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath
|
||||
// TODO(matloob): check if the file is outside of a root dir?
|
||||
for path := range cfg.Overlay {
|
||||
if path == filename {
|
||||
dirResponse.Packages[0].Errors = nil
|
||||
dirResponse.Packages[0].GoFiles = []string{path}
|
||||
dirResponse.Packages[0].CompiledGoFiles = []string{path}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
isRoot := make(map[string]bool, len(dirResponse.Roots))
|
||||
@@ -350,74 +342,6 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q
|
||||
return nil
|
||||
}
|
||||
|
||||
// adHocPackage attempts to construct an ad-hoc package given a query that failed.
|
||||
func adHocPackage(cfg *Config, driver driver, pattern, query string) (*driverResponse, error) {
|
||||
// There was an error loading the package. Try to load the file as an ad-hoc package.
|
||||
// Usually the error will appear in a returned package, but may not if we're in modules mode
|
||||
// and the ad-hoc is located outside a module.
|
||||
dirResponse, err := driver(cfg, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If we get nothing back from `go list`, try to make this file into its own ad-hoc package.
|
||||
if len(dirResponse.Packages) == 0 && err == nil {
|
||||
dirResponse.Packages = append(dirResponse.Packages, &Package{
|
||||
ID: "command-line-arguments",
|
||||
PkgPath: query,
|
||||
GoFiles: []string{query},
|
||||
CompiledGoFiles: []string{query},
|
||||
Imports: make(map[string]*Package),
|
||||
})
|
||||
dirResponse.Roots = append(dirResponse.Roots, "command-line-arguments")
|
||||
}
|
||||
// Special case to handle issue #33482:
|
||||
// If this is a file= query for ad-hoc packages where the file only exists on an overlay,
|
||||
// and exists outside of a module, add the file in for the package.
|
||||
if len(dirResponse.Packages) == 1 && (dirResponse.Packages[0].ID == "command-line-arguments" || dirResponse.Packages[0].PkgPath == filepath.ToSlash(query)) {
|
||||
if len(dirResponse.Packages[0].GoFiles) == 0 {
|
||||
filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath
|
||||
// TODO(matloob): check if the file is outside of a root dir?
|
||||
for path := range cfg.Overlay {
|
||||
if path == filename {
|
||||
dirResponse.Packages[0].Errors = nil
|
||||
dirResponse.Packages[0].GoFiles = []string{path}
|
||||
dirResponse.Packages[0].CompiledGoFiles = []string{path}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return dirResponse, nil
|
||||
}
|
||||
|
||||
func contains(files []string, filename string) bool {
|
||||
for _, f := range files {
|
||||
if f == filename {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// errorSpan attempts to parse a standard `go list` error message
|
||||
// by stripping off the trailing error message.
|
||||
//
|
||||
// It works only on errors whose message is prefixed by colon,
|
||||
// followed by a space (": "). For example:
|
||||
//
|
||||
// attributes.go:13:1: expected 'package', found 'type'
|
||||
//
|
||||
func errorSpan(err Error) span.Span {
|
||||
if err.Pos == "" {
|
||||
input := strings.TrimSpace(err.Msg)
|
||||
msgIndex := strings.Index(input, ": ")
|
||||
if msgIndex < 0 {
|
||||
return span.Parse(input)
|
||||
}
|
||||
return span.Parse(input[:msgIndex])
|
||||
}
|
||||
return span.Parse(err.Pos)
|
||||
}
|
||||
|
||||
// modCacheRegexp splits a path in a module cache into module, module version, and package.
|
||||
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
|
||||
|
||||
@@ -749,7 +673,7 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv
|
||||
|
||||
// Run "go list" for complete
|
||||
// information on the specified packages.
|
||||
buf, err := invokeGo(cfg, golistargs(cfg, words)...)
|
||||
buf, err := invokeGo(cfg, "list", golistargs(cfg, words)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -881,9 +805,15 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv
|
||||
}
|
||||
|
||||
if p.Error != nil {
|
||||
msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363.
|
||||
// Address golang.org/issue/35964 by appending import stack to error message.
|
||||
if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 {
|
||||
msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack)
|
||||
}
|
||||
pkg.Errors = append(pkg.Errors, Error{
|
||||
Pos: p.Error.Pos,
|
||||
Msg: strings.TrimSpace(p.Error.Err), // Trim to work around golang.org/issue/32363.
|
||||
Pos: p.Error.Pos,
|
||||
Msg: msg,
|
||||
Kind: ListError,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -947,7 +877,7 @@ func absJoin(dir string, fileses ...[]string) (res []string) {
|
||||
func golistargs(cfg *Config, words []string) []string {
|
||||
const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
|
||||
fullargs := []string{
|
||||
"list", "-e", "-json",
|
||||
"-e", "-json",
|
||||
fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0),
|
||||
fmt.Sprintf("-test=%t", cfg.Tests),
|
||||
fmt.Sprintf("-export=%t", usesExportData(cfg)),
|
||||
@@ -963,10 +893,13 @@ func golistargs(cfg *Config, words []string) []string {
|
||||
}
|
||||
|
||||
// invokeGo returns the stdout of a go command invocation.
|
||||
func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
|
||||
func invokeGo(cfg *Config, verb string, args ...string) (*bytes.Buffer, error) {
|
||||
stdout := new(bytes.Buffer)
|
||||
stderr := new(bytes.Buffer)
|
||||
cmd := exec.CommandContext(cfg.Context, "go", args...)
|
||||
goArgs := []string{verb}
|
||||
goArgs = append(goArgs, cfg.BuildFlags...)
|
||||
goArgs = append(goArgs, args...)
|
||||
cmd := exec.CommandContext(cfg.Context, "go", goArgs...)
|
||||
// On darwin the cwd gets resolved to the real path, which breaks anything that
|
||||
// expects the working directory to keep the original path, including the
|
||||
// go command when dealing with modules.
|
||||
|
||||
57
vendor/golang.org/x/tools/go/packages/loadmode_string.go
generated
vendored
Normal file
57
vendor/golang.org/x/tools/go/packages/loadmode_string.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package packages
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var allModes = []LoadMode{
|
||||
NeedName,
|
||||
NeedFiles,
|
||||
NeedCompiledGoFiles,
|
||||
NeedImports,
|
||||
NeedDeps,
|
||||
NeedExportsFile,
|
||||
NeedTypes,
|
||||
NeedSyntax,
|
||||
NeedTypesInfo,
|
||||
NeedTypesSizes,
|
||||
}
|
||||
|
||||
var modeStrings = []string{
|
||||
"NeedName",
|
||||
"NeedFiles",
|
||||
"NeedCompiledGoFiles",
|
||||
"NeedImports",
|
||||
"NeedDeps",
|
||||
"NeedExportsFile",
|
||||
"NeedTypes",
|
||||
"NeedSyntax",
|
||||
"NeedTypesInfo",
|
||||
"NeedTypesSizes",
|
||||
}
|
||||
|
||||
func (mod LoadMode) String() string {
|
||||
m := mod
|
||||
if m == 0 {
|
||||
return fmt.Sprintf("LoadMode(0)")
|
||||
}
|
||||
var out []string
|
||||
for i, x := range allModes {
|
||||
if x > m {
|
||||
break
|
||||
}
|
||||
if (m & x) != 0 {
|
||||
out = append(out, modeStrings[i])
|
||||
m = m ^ x
|
||||
}
|
||||
}
|
||||
if m != 0 {
|
||||
out = append(out, "Unknown")
|
||||
}
|
||||
return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|"))
|
||||
}
|
||||
15
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
15
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
@@ -160,7 +160,7 @@ type Config struct {
|
||||
Tests bool
|
||||
|
||||
// Overlay provides a mapping of absolute file paths to file contents.
|
||||
// If the file with the given path already exists, the parser will use the
|
||||
// If the file with the given path already exists, the parser will use the
|
||||
// alternative file contents provided by the map.
|
||||
//
|
||||
// Overlays provide incomplete support for when a given file doesn't
|
||||
@@ -467,7 +467,7 @@ func newLoader(cfg *Config) *loader {
|
||||
ld.requestedMode = ld.Mode
|
||||
ld.Mode = impliedLoadMode(ld.Mode)
|
||||
|
||||
if ld.Mode&NeedTypes != 0 {
|
||||
if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
|
||||
if ld.Fset == nil {
|
||||
ld.Fset = token.NewFileSet()
|
||||
}
|
||||
@@ -609,9 +609,9 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Load type data if needed, starting at
|
||||
// Load type data and syntax if needed, starting at
|
||||
// the initial packages (roots of the import DAG).
|
||||
if ld.Mode&NeedTypes != 0 {
|
||||
if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
|
||||
var wg sync.WaitGroup
|
||||
for _, lpkg := range initial {
|
||||
wg.Add(1)
|
||||
@@ -713,7 +713,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
// which would then require that such created packages be explicitly
|
||||
// inserted back into the Import graph as a final step after export data loading.
|
||||
// The Diamond test exercises this case.
|
||||
if !lpkg.needtypes {
|
||||
if !lpkg.needtypes && !lpkg.needsrc {
|
||||
return
|
||||
}
|
||||
if !lpkg.needsrc {
|
||||
@@ -770,7 +770,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
lpkg.Errors = append(lpkg.Errors, errs...)
|
||||
}
|
||||
|
||||
if len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" {
|
||||
if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" {
|
||||
// The config requested loading sources and types, but sources are missing.
|
||||
// Add an error to the package and fall back to loading from export data.
|
||||
appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError})
|
||||
@@ -784,6 +784,9 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
}
|
||||
|
||||
lpkg.Syntax = files
|
||||
if ld.Config.Mode&NeedTypes == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
lpkg.TypesInfo = &types.Info{
|
||||
Types: make(map[ast.Expr]types.TypeAndValue),
|
||||
|
||||
33
vendor/golang.org/x/tools/go/pointer/TODO
generated
vendored
Normal file
33
vendor/golang.org/x/tools/go/pointer/TODO
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
-*- text -*-
|
||||
|
||||
Pointer analysis to-do list
|
||||
===========================
|
||||
|
||||
CONSTRAINT GENERATION:
|
||||
- support reflection:
|
||||
- a couple of operators are missing
|
||||
- reflect.Values may contain lvalues (CanAddr)
|
||||
- implement native intrinsics. These vary by platform.
|
||||
- add to pts(a.panic) a label representing all runtime panics, e.g.
|
||||
runtime.{TypeAssertionError,errorString,errorCString}.
|
||||
|
||||
OPTIMISATIONS
|
||||
- pre-solver:
|
||||
pointer equivalence: extend HVN to HRU
|
||||
location equivalence
|
||||
- solver: HCD, LCD.
|
||||
- experiment with map+slice worklist in lieu of bitset.
|
||||
It may have faster insert.
|
||||
|
||||
MISC:
|
||||
- Test on all platforms.
|
||||
Currently we assume these go/build tags: linux, amd64, !cgo.
|
||||
|
||||
MAINTAINABILITY
|
||||
- Think about ways to make debugging this code easier. PTA logs
|
||||
routinely exceed a million lines and require training to read.
|
||||
|
||||
BUGS:
|
||||
- There's a crash bug in stdlib_test + reflection, rVCallConstraint.
|
||||
|
||||
|
||||
452
vendor/golang.org/x/tools/go/pointer/analysis.go
generated
vendored
Normal file
452
vendor/golang.org/x/tools/go/pointer/analysis.go
generated
vendored
Normal file
@@ -0,0 +1,452 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pointer
|
||||
|
||||
// This file defines the main datatypes and Analyze function of the pointer analysis.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
|
||||
"golang.org/x/tools/go/callgraph"
|
||||
"golang.org/x/tools/go/ssa"
|
||||
"golang.org/x/tools/go/types/typeutil"
|
||||
)
|
||||
|
||||
const (
|
||||
// optimization options; enable all when committing
|
||||
optRenumber = true // enable renumbering optimization (makes logs hard to read)
|
||||
optHVN = true // enable pointer equivalence via Hash-Value Numbering
|
||||
|
||||
// debugging options; disable all when committing
|
||||
debugHVN = false // enable assertions in HVN
|
||||
debugHVNVerbose = false // enable extra HVN logging
|
||||
debugHVNCrossCheck = false // run solver with/without HVN and compare (caveats below)
|
||||
debugTimers = false // show running time of each phase
|
||||
)
|
||||
|
||||
// object.flags bitmask values.
|
||||
const (
|
||||
otTagged = 1 << iota // type-tagged object
|
||||
otIndirect // type-tagged object with indirect payload
|
||||
otFunction // function object
|
||||
)
|
||||
|
||||
// An object represents a contiguous block of memory to which some
|
||||
// (generalized) pointer may point.
|
||||
//
|
||||
// (Note: most variables called 'obj' are not *objects but nodeids
|
||||
// such that a.nodes[obj].obj != nil.)
|
||||
//
|
||||
type object struct {
|
||||
// flags is a bitset of the node type (ot*) flags defined above.
|
||||
flags uint32
|
||||
|
||||
// Number of following nodes belonging to the same "object"
|
||||
// allocation. Zero for all other nodes.
|
||||
size uint32
|
||||
|
||||
// data describes this object; it has one of these types:
|
||||
//
|
||||
// ssa.Value for an object allocated by an SSA operation.
|
||||
// types.Type for an rtype instance object or *rtype-tagged object.
|
||||
// string for an instrinsic object, e.g. the array behind os.Args.
|
||||
// nil for an object allocated by an instrinsic.
|
||||
// (cgn provides the identity of the intrinsic.)
|
||||
data interface{}
|
||||
|
||||
// The call-graph node (=context) in which this object was allocated.
|
||||
// May be nil for global objects: Global, Const, some Functions.
|
||||
cgn *cgnode
|
||||
}
|
||||
|
||||
// nodeid denotes a node.
|
||||
// It is an index within analysis.nodes.
|
||||
// We use small integers, not *node pointers, for many reasons:
|
||||
// - they are smaller on 64-bit systems.
|
||||
// - sets of them can be represented compactly in bitvectors or BDDs.
|
||||
// - order matters; a field offset can be computed by simple addition.
|
||||
type nodeid uint32
|
||||
|
||||
// A node is an equivalence class of memory locations.
|
||||
// Nodes may be pointers, pointed-to locations, neither, or both.
|
||||
//
|
||||
// Nodes that are pointed-to locations ("labels") have an enclosing
|
||||
// object (see analysis.enclosingObject).
|
||||
//
|
||||
type node struct {
|
||||
// If non-nil, this node is the start of an object
|
||||
// (addressable memory location).
|
||||
// The following obj.size nodes implicitly belong to the object;
|
||||
// they locate their object by scanning back.
|
||||
obj *object
|
||||
|
||||
// The type of the field denoted by this node. Non-aggregate,
|
||||
// unless this is an tagged.T node (i.e. the thing
|
||||
// pointed to by an interface) in which case typ is that type.
|
||||
typ types.Type
|
||||
|
||||
// subelement indicates which directly embedded subelement of
|
||||
// an object of aggregate type (struct, tuple, array) this is.
|
||||
subelement *fieldInfo // e.g. ".a.b[*].c"
|
||||
|
||||
// Solver state for the canonical node of this pointer-
|
||||
// equivalence class. Each node is created with its own state
|
||||
// but they become shared after HVN.
|
||||
solve *solverState
|
||||
}
|
||||
|
||||
// An analysis instance holds the state of a single pointer analysis problem.
|
||||
type analysis struct {
|
||||
config *Config // the client's control/observer interface
|
||||
prog *ssa.Program // the program being analyzed
|
||||
log io.Writer // log stream; nil to disable
|
||||
panicNode nodeid // sink for panic, source for recover
|
||||
nodes []*node // indexed by nodeid
|
||||
flattenMemo map[types.Type][]*fieldInfo // memoization of flatten()
|
||||
trackTypes map[types.Type]bool // memoization of shouldTrack()
|
||||
constraints []constraint // set of constraints
|
||||
cgnodes []*cgnode // all cgnodes
|
||||
genq []*cgnode // queue of functions to generate constraints for
|
||||
intrinsics map[*ssa.Function]intrinsic // non-nil values are summaries for intrinsic fns
|
||||
globalval map[ssa.Value]nodeid // node for each global ssa.Value
|
||||
globalobj map[ssa.Value]nodeid // maps v to sole member of pts(v), if singleton
|
||||
localval map[ssa.Value]nodeid // node for each local ssa.Value
|
||||
localobj map[ssa.Value]nodeid // maps v to sole member of pts(v), if singleton
|
||||
atFuncs map[*ssa.Function]bool // address-taken functions (for presolver)
|
||||
mapValues []nodeid // values of makemap objects (indirect in HVN)
|
||||
work nodeset // solver's worklist
|
||||
result *Result // results of the analysis
|
||||
track track // pointerlike types whose aliasing we track
|
||||
deltaSpace []int // working space for iterating over PTS deltas
|
||||
|
||||
// Reflection & intrinsics:
|
||||
hasher typeutil.Hasher // cache of type hashes
|
||||
reflectValueObj types.Object // type symbol for reflect.Value (if present)
|
||||
reflectValueCall *ssa.Function // (reflect.Value).Call
|
||||
reflectRtypeObj types.Object // *types.TypeName for reflect.rtype (if present)
|
||||
reflectRtypePtr *types.Pointer // *reflect.rtype
|
||||
reflectType *types.Named // reflect.Type
|
||||
rtypes typeutil.Map // nodeid of canonical *rtype-tagged object for type T
|
||||
reflectZeros typeutil.Map // nodeid of canonical T-tagged object for zero value
|
||||
runtimeSetFinalizer *ssa.Function // runtime.SetFinalizer
|
||||
}
|
||||
|
||||
// enclosingObj returns the first node of the addressable memory
|
||||
// object that encloses node id. Panic ensues if that node does not
|
||||
// belong to any object.
|
||||
func (a *analysis) enclosingObj(id nodeid) nodeid {
|
||||
// Find previous node with obj != nil.
|
||||
for i := id; i >= 0; i-- {
|
||||
n := a.nodes[i]
|
||||
if obj := n.obj; obj != nil {
|
||||
if i+nodeid(obj.size) <= id {
|
||||
break // out of bounds
|
||||
}
|
||||
return i
|
||||
}
|
||||
}
|
||||
panic("node has no enclosing object")
|
||||
}
|
||||
|
||||
// labelFor returns the Label for node id.
|
||||
// Panic ensues if that node is not addressable.
|
||||
func (a *analysis) labelFor(id nodeid) *Label {
|
||||
return &Label{
|
||||
obj: a.nodes[a.enclosingObj(id)].obj,
|
||||
subelement: a.nodes[id].subelement,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *analysis) warnf(pos token.Pos, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
if a.log != nil {
|
||||
fmt.Fprintf(a.log, "%s: warning: %s\n", a.prog.Fset.Position(pos), msg)
|
||||
}
|
||||
a.result.Warnings = append(a.result.Warnings, Warning{pos, msg})
|
||||
}
|
||||
|
||||
// computeTrackBits sets a.track to the necessary 'track' bits for the pointer queries.
|
||||
func (a *analysis) computeTrackBits() {
|
||||
if len(a.config.extendedQueries) != 0 {
|
||||
// TODO(dh): only track the types necessary for the query.
|
||||
a.track = trackAll
|
||||
return
|
||||
}
|
||||
var queryTypes []types.Type
|
||||
for v := range a.config.Queries {
|
||||
queryTypes = append(queryTypes, v.Type())
|
||||
}
|
||||
for v := range a.config.IndirectQueries {
|
||||
queryTypes = append(queryTypes, mustDeref(v.Type()))
|
||||
}
|
||||
for _, t := range queryTypes {
|
||||
switch t.Underlying().(type) {
|
||||
case *types.Chan:
|
||||
a.track |= trackChan
|
||||
case *types.Map:
|
||||
a.track |= trackMap
|
||||
case *types.Pointer:
|
||||
a.track |= trackPtr
|
||||
case *types.Slice:
|
||||
a.track |= trackSlice
|
||||
case *types.Interface:
|
||||
a.track = trackAll
|
||||
return
|
||||
}
|
||||
if rVObj := a.reflectValueObj; rVObj != nil && types.Identical(t, rVObj.Type()) {
|
||||
a.track = trackAll
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Analyze runs the pointer analysis with the scope and options
|
||||
// specified by config, and returns the (synthetic) root of the callgraph.
|
||||
//
|
||||
// Pointer analysis of a transitively closed well-typed program should
|
||||
// always succeed. An error can occur only due to an internal bug.
|
||||
//
|
||||
func Analyze(config *Config) (result *Result, err error) {
|
||||
if config.Mains == nil {
|
||||
return nil, fmt.Errorf("no main/test packages to analyze (check $GOROOT/$GOPATH)")
|
||||
}
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
err = fmt.Errorf("internal error in pointer analysis: %v (please report this bug)", p)
|
||||
fmt.Fprintln(os.Stderr, "Internal panic in pointer analysis:")
|
||||
debug.PrintStack()
|
||||
}
|
||||
}()
|
||||
|
||||
a := &analysis{
|
||||
config: config,
|
||||
log: config.Log,
|
||||
prog: config.prog(),
|
||||
globalval: make(map[ssa.Value]nodeid),
|
||||
globalobj: make(map[ssa.Value]nodeid),
|
||||
flattenMemo: make(map[types.Type][]*fieldInfo),
|
||||
trackTypes: make(map[types.Type]bool),
|
||||
atFuncs: make(map[*ssa.Function]bool),
|
||||
hasher: typeutil.MakeHasher(),
|
||||
intrinsics: make(map[*ssa.Function]intrinsic),
|
||||
result: &Result{
|
||||
Queries: make(map[ssa.Value]Pointer),
|
||||
IndirectQueries: make(map[ssa.Value]Pointer),
|
||||
},
|
||||
deltaSpace: make([]int, 0, 100),
|
||||
}
|
||||
|
||||
if false {
|
||||
a.log = os.Stderr // for debugging crashes; extremely verbose
|
||||
}
|
||||
|
||||
if a.log != nil {
|
||||
fmt.Fprintln(a.log, "==== Starting analysis")
|
||||
}
|
||||
|
||||
// Pointer analysis requires a complete program for soundness.
|
||||
// Check to prevent accidental misconfiguration.
|
||||
for _, pkg := range a.prog.AllPackages() {
|
||||
// (This only checks that the package scope is complete,
|
||||
// not that func bodies exist, but it's a good signal.)
|
||||
if !pkg.Pkg.Complete() {
|
||||
return nil, fmt.Errorf(`pointer analysis requires a complete program yet package %q was incomplete`, pkg.Pkg.Path())
|
||||
}
|
||||
}
|
||||
|
||||
if reflect := a.prog.ImportedPackage("reflect"); reflect != nil {
|
||||
rV := reflect.Pkg.Scope().Lookup("Value")
|
||||
a.reflectValueObj = rV
|
||||
a.reflectValueCall = a.prog.LookupMethod(rV.Type(), nil, "Call")
|
||||
a.reflectType = reflect.Pkg.Scope().Lookup("Type").Type().(*types.Named)
|
||||
a.reflectRtypeObj = reflect.Pkg.Scope().Lookup("rtype")
|
||||
a.reflectRtypePtr = types.NewPointer(a.reflectRtypeObj.Type())
|
||||
|
||||
// Override flattening of reflect.Value, treating it like a basic type.
|
||||
tReflectValue := a.reflectValueObj.Type()
|
||||
a.flattenMemo[tReflectValue] = []*fieldInfo{{typ: tReflectValue}}
|
||||
|
||||
// Override shouldTrack of reflect.Value and *reflect.rtype.
|
||||
// Always track pointers of these types.
|
||||
a.trackTypes[tReflectValue] = true
|
||||
a.trackTypes[a.reflectRtypePtr] = true
|
||||
|
||||
a.rtypes.SetHasher(a.hasher)
|
||||
a.reflectZeros.SetHasher(a.hasher)
|
||||
}
|
||||
if runtime := a.prog.ImportedPackage("runtime"); runtime != nil {
|
||||
a.runtimeSetFinalizer = runtime.Func("SetFinalizer")
|
||||
}
|
||||
a.computeTrackBits()
|
||||
|
||||
a.generate()
|
||||
a.showCounts()
|
||||
|
||||
if optRenumber {
|
||||
a.renumber()
|
||||
}
|
||||
|
||||
N := len(a.nodes) // excludes solver-created nodes
|
||||
|
||||
if optHVN {
|
||||
if debugHVNCrossCheck {
|
||||
// Cross-check: run the solver once without
|
||||
// optimization, once with, and compare the
|
||||
// solutions.
|
||||
savedConstraints := a.constraints
|
||||
|
||||
a.solve()
|
||||
a.dumpSolution("A.pts", N)
|
||||
|
||||
// Restore.
|
||||
a.constraints = savedConstraints
|
||||
for _, n := range a.nodes {
|
||||
n.solve = new(solverState)
|
||||
}
|
||||
a.nodes = a.nodes[:N]
|
||||
|
||||
// rtypes is effectively part of the solver state.
|
||||
a.rtypes = typeutil.Map{}
|
||||
a.rtypes.SetHasher(a.hasher)
|
||||
}
|
||||
|
||||
a.hvn()
|
||||
}
|
||||
|
||||
if debugHVNCrossCheck {
|
||||
runtime.GC()
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
a.solve()
|
||||
|
||||
// Compare solutions.
|
||||
if optHVN && debugHVNCrossCheck {
|
||||
a.dumpSolution("B.pts", N)
|
||||
|
||||
if !diff("A.pts", "B.pts") {
|
||||
return nil, fmt.Errorf("internal error: optimization changed solution")
|
||||
}
|
||||
}
|
||||
|
||||
// Create callgraph.Nodes in deterministic order.
|
||||
if cg := a.result.CallGraph; cg != nil {
|
||||
for _, caller := range a.cgnodes {
|
||||
cg.CreateNode(caller.fn)
|
||||
}
|
||||
}
|
||||
|
||||
// Add dynamic edges to call graph.
|
||||
var space [100]int
|
||||
for _, caller := range a.cgnodes {
|
||||
for _, site := range caller.sites {
|
||||
for _, callee := range a.nodes[site.targets].solve.pts.AppendTo(space[:0]) {
|
||||
a.callEdge(caller, site, nodeid(callee))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return a.result, nil
|
||||
}
|
||||
|
||||
// callEdge is called for each edge in the callgraph.
|
||||
// calleeid is the callee's object node (has otFunction flag).
|
||||
//
|
||||
func (a *analysis) callEdge(caller *cgnode, site *callsite, calleeid nodeid) {
|
||||
obj := a.nodes[calleeid].obj
|
||||
if obj.flags&otFunction == 0 {
|
||||
panic(fmt.Sprintf("callEdge %s -> n%d: not a function object", site, calleeid))
|
||||
}
|
||||
callee := obj.cgn
|
||||
|
||||
if cg := a.result.CallGraph; cg != nil {
|
||||
// TODO(adonovan): opt: I would expect duplicate edges
|
||||
// (to wrappers) to arise due to the elimination of
|
||||
// context information, but I haven't observed any.
|
||||
// Understand this better.
|
||||
callgraph.AddEdge(cg.CreateNode(caller.fn), site.instr, cg.CreateNode(callee.fn))
|
||||
}
|
||||
|
||||
if a.log != nil {
|
||||
fmt.Fprintf(a.log, "\tcall edge %s -> %s\n", site, callee)
|
||||
}
|
||||
|
||||
// Warn about calls to non-intrinsic external functions.
|
||||
// TODO(adonovan): de-dup these messages.
|
||||
if fn := callee.fn; fn.Blocks == nil && a.findIntrinsic(fn) == nil {
|
||||
a.warnf(site.pos(), "unsound call to unknown intrinsic: %s", fn)
|
||||
a.warnf(fn.Pos(), " (declared here)")
|
||||
}
|
||||
}
|
||||
|
||||
// dumpSolution writes the PTS solution to the specified file.
|
||||
//
|
||||
// It only dumps the nodes that existed before solving. The order in
|
||||
// which solver-created nodes are created depends on pre-solver
|
||||
// optimization, so we can't include them in the cross-check.
|
||||
//
|
||||
func (a *analysis) dumpSolution(filename string, N int) {
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for id, n := range a.nodes[:N] {
|
||||
if _, err := fmt.Fprintf(f, "pts(n%d) = {", id); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var sep string
|
||||
for _, l := range n.solve.pts.AppendTo(a.deltaSpace) {
|
||||
if l >= N {
|
||||
break
|
||||
}
|
||||
fmt.Fprintf(f, "%s%d", sep, l)
|
||||
sep = " "
|
||||
}
|
||||
fmt.Fprintf(f, "} : %s\n", n.typ)
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// showCounts logs the size of the constraint system. A typical
|
||||
// optimized distribution is 65% copy, 13% load, 11% addr, 5%
|
||||
// offsetAddr, 4% store, 2% others.
|
||||
//
|
||||
func (a *analysis) showCounts() {
|
||||
if a.log != nil {
|
||||
counts := make(map[reflect.Type]int)
|
||||
for _, c := range a.constraints {
|
||||
counts[reflect.TypeOf(c)]++
|
||||
}
|
||||
fmt.Fprintf(a.log, "# constraints:\t%d\n", len(a.constraints))
|
||||
var lines []string
|
||||
for t, n := range counts {
|
||||
line := fmt.Sprintf("%7d (%2d%%)\t%s", n, 100*n/len(a.constraints), t)
|
||||
lines = append(lines, line)
|
||||
}
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(lines)))
|
||||
for _, line := range lines {
|
||||
fmt.Fprintf(a.log, "\t%s\n", line)
|
||||
}
|
||||
|
||||
fmt.Fprintf(a.log, "# nodes:\t%d\n", len(a.nodes))
|
||||
|
||||
// Show number of pointer equivalence classes.
|
||||
m := make(map[*solverState]bool)
|
||||
for _, n := range a.nodes {
|
||||
m[n.solve] = true
|
||||
}
|
||||
fmt.Fprintf(a.log, "# ptsets:\t%d\n", len(m))
|
||||
}
|
||||
}
|
||||
285
vendor/golang.org/x/tools/go/pointer/api.go
generated
vendored
Normal file
285
vendor/golang.org/x/tools/go/pointer/api.go
generated
vendored
Normal file
@@ -0,0 +1,285 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pointer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/token"
|
||||
"io"
|
||||
|
||||
"golang.org/x/tools/container/intsets"
|
||||
"golang.org/x/tools/go/callgraph"
|
||||
"golang.org/x/tools/go/ssa"
|
||||
"golang.org/x/tools/go/types/typeutil"
|
||||
)
|
||||
|
||||
// A Config formulates a pointer analysis problem for Analyze. It is
|
||||
// only usable for a single invocation of Analyze and must not be
|
||||
// reused.
|
||||
type Config struct {
|
||||
// Mains contains the set of 'main' packages to analyze
|
||||
// Clients must provide the analysis with at least one
|
||||
// package defining a main() function.
|
||||
//
|
||||
// Non-main packages in the ssa.Program that are not
|
||||
// dependencies of any main package may still affect the
|
||||
// analysis result, because they contribute runtime types and
|
||||
// thus methods.
|
||||
// TODO(adonovan): investigate whether this is desirable.
|
||||
Mains []*ssa.Package
|
||||
|
||||
// Reflection determines whether to handle reflection
|
||||
// operators soundly, which is currently rather slow since it
|
||||
// causes constraint to be generated during solving
|
||||
// proportional to the number of constraint variables, which
|
||||
// has not yet been reduced by presolver optimisation.
|
||||
Reflection bool
|
||||
|
||||
// BuildCallGraph determines whether to construct a callgraph.
|
||||
// If enabled, the graph will be available in Result.CallGraph.
|
||||
BuildCallGraph bool
|
||||
|
||||
// The client populates Queries[v] or IndirectQueries[v]
|
||||
// for each ssa.Value v of interest, to request that the
|
||||
// points-to sets pts(v) or pts(*v) be computed. If the
|
||||
// client needs both points-to sets, v may appear in both
|
||||
// maps.
|
||||
//
|
||||
// (IndirectQueries is typically used for Values corresponding
|
||||
// to source-level lvalues, e.g. an *ssa.Global.)
|
||||
//
|
||||
// The analysis populates the corresponding
|
||||
// Result.{Indirect,}Queries map when it creates the pointer
|
||||
// variable for v or *v. Upon completion the client can
|
||||
// inspect that map for the results.
|
||||
//
|
||||
// TODO(adonovan): this API doesn't scale well for batch tools
|
||||
// that want to dump the entire solution. Perhaps optionally
|
||||
// populate a map[*ssa.DebugRef]Pointer in the Result, one
|
||||
// entry per source expression.
|
||||
//
|
||||
Queries map[ssa.Value]struct{}
|
||||
IndirectQueries map[ssa.Value]struct{}
|
||||
extendedQueries map[ssa.Value][]*extendedQuery
|
||||
|
||||
// If Log is non-nil, log messages are written to it.
|
||||
// Logging is extremely verbose.
|
||||
Log io.Writer
|
||||
}
|
||||
|
||||
type track uint32
|
||||
|
||||
const (
|
||||
trackChan track = 1 << iota // track 'chan' references
|
||||
trackMap // track 'map' references
|
||||
trackPtr // track regular pointers
|
||||
trackSlice // track slice references
|
||||
|
||||
trackAll = ^track(0)
|
||||
)
|
||||
|
||||
// AddQuery adds v to Config.Queries.
|
||||
// Precondition: CanPoint(v.Type()).
|
||||
func (c *Config) AddQuery(v ssa.Value) {
|
||||
if !CanPoint(v.Type()) {
|
||||
panic(fmt.Sprintf("%s is not a pointer-like value: %s", v, v.Type()))
|
||||
}
|
||||
if c.Queries == nil {
|
||||
c.Queries = make(map[ssa.Value]struct{})
|
||||
}
|
||||
c.Queries[v] = struct{}{}
|
||||
}
|
||||
|
||||
// AddQuery adds v to Config.IndirectQueries.
|
||||
// Precondition: CanPoint(v.Type().Underlying().(*types.Pointer).Elem()).
|
||||
func (c *Config) AddIndirectQuery(v ssa.Value) {
|
||||
if c.IndirectQueries == nil {
|
||||
c.IndirectQueries = make(map[ssa.Value]struct{})
|
||||
}
|
||||
if !CanPoint(mustDeref(v.Type())) {
|
||||
panic(fmt.Sprintf("%s is not the address of a pointer-like value: %s", v, v.Type()))
|
||||
}
|
||||
c.IndirectQueries[v] = struct{}{}
|
||||
}
|
||||
|
||||
// AddExtendedQuery adds an extended, AST-based query on v to the
|
||||
// analysis. The query, which must be a single Go expression, allows
|
||||
// destructuring the value.
|
||||
//
|
||||
// The query must operate on a variable named 'x', which represents
|
||||
// the value, and result in a pointer-like object. Only a subset of
|
||||
// Go expressions are permitted in queries, namely channel receives,
|
||||
// pointer dereferences, field selectors, array/slice/map/tuple
|
||||
// indexing and grouping with parentheses. The specific indices when
|
||||
// indexing arrays, slices and maps have no significance. Indices used
|
||||
// on tuples must be numeric and within bounds.
|
||||
//
|
||||
// All field selectors must be explicit, even ones usually elided
|
||||
// due to promotion of embedded fields.
|
||||
//
|
||||
// The query 'x' is identical to using AddQuery. The query '*x' is
|
||||
// identical to using AddIndirectQuery.
|
||||
//
|
||||
// On success, AddExtendedQuery returns a Pointer to the queried
|
||||
// value. This Pointer will be initialized during analysis. Using it
|
||||
// before analysis has finished has undefined behavior.
|
||||
//
|
||||
// Example:
|
||||
// // given v, which represents a function call to 'fn() (int, []*T)', and
|
||||
// // 'type T struct { F *int }', the following query will access the field F.
|
||||
// c.AddExtendedQuery(v, "x[1][0].F")
|
||||
func (c *Config) AddExtendedQuery(v ssa.Value, query string) (*Pointer, error) {
|
||||
ops, _, err := parseExtendedQuery(v.Type(), query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid query %q: %s", query, err)
|
||||
}
|
||||
if c.extendedQueries == nil {
|
||||
c.extendedQueries = make(map[ssa.Value][]*extendedQuery)
|
||||
}
|
||||
|
||||
ptr := &Pointer{}
|
||||
c.extendedQueries[v] = append(c.extendedQueries[v], &extendedQuery{ops: ops, ptr: ptr})
|
||||
return ptr, nil
|
||||
}
|
||||
|
||||
func (c *Config) prog() *ssa.Program {
|
||||
for _, main := range c.Mains {
|
||||
return main.Prog
|
||||
}
|
||||
panic("empty scope")
|
||||
}
|
||||
|
||||
type Warning struct {
|
||||
Pos token.Pos
|
||||
Message string
|
||||
}
|
||||
|
||||
// A Result contains the results of a pointer analysis.
|
||||
//
|
||||
// See Config for how to request the various Result components.
|
||||
//
|
||||
type Result struct {
|
||||
CallGraph *callgraph.Graph // discovered call graph
|
||||
Queries map[ssa.Value]Pointer // pts(v) for each v in Config.Queries.
|
||||
IndirectQueries map[ssa.Value]Pointer // pts(*v) for each v in Config.IndirectQueries.
|
||||
Warnings []Warning // warnings of unsoundness
|
||||
}
|
||||
|
||||
// A Pointer is an equivalence class of pointer-like values.
|
||||
//
|
||||
// A Pointer doesn't have a unique type because pointers of distinct
|
||||
// types may alias the same object.
|
||||
//
|
||||
type Pointer struct {
|
||||
a *analysis
|
||||
n nodeid
|
||||
}
|
||||
|
||||
// A PointsToSet is a set of labels (locations or allocations).
|
||||
type PointsToSet struct {
|
||||
a *analysis // may be nil if pts is nil
|
||||
pts *nodeset
|
||||
}
|
||||
|
||||
func (s PointsToSet) String() string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteByte('[')
|
||||
if s.pts != nil {
|
||||
var space [50]int
|
||||
for i, l := range s.pts.AppendTo(space[:0]) {
|
||||
if i > 0 {
|
||||
buf.WriteString(", ")
|
||||
}
|
||||
buf.WriteString(s.a.labelFor(nodeid(l)).String())
|
||||
}
|
||||
}
|
||||
buf.WriteByte(']')
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// PointsTo returns the set of labels that this points-to set
|
||||
// contains.
|
||||
func (s PointsToSet) Labels() []*Label {
|
||||
var labels []*Label
|
||||
if s.pts != nil {
|
||||
var space [50]int
|
||||
for _, l := range s.pts.AppendTo(space[:0]) {
|
||||
labels = append(labels, s.a.labelFor(nodeid(l)))
|
||||
}
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
// If this PointsToSet came from a Pointer of interface kind
|
||||
// or a reflect.Value, DynamicTypes returns the set of dynamic
|
||||
// types that it may contain. (For an interface, they will
|
||||
// always be concrete types.)
|
||||
//
|
||||
// The result is a mapping whose keys are the dynamic types to which
|
||||
// it may point. For each pointer-like key type, the corresponding
|
||||
// map value is the PointsToSet for pointers of that type.
|
||||
//
|
||||
// The result is empty unless CanHaveDynamicTypes(T).
|
||||
//
|
||||
func (s PointsToSet) DynamicTypes() *typeutil.Map {
|
||||
var tmap typeutil.Map
|
||||
tmap.SetHasher(s.a.hasher)
|
||||
if s.pts != nil {
|
||||
var space [50]int
|
||||
for _, x := range s.pts.AppendTo(space[:0]) {
|
||||
ifaceObjId := nodeid(x)
|
||||
if !s.a.isTaggedObject(ifaceObjId) {
|
||||
continue // !CanHaveDynamicTypes(tDyn)
|
||||
}
|
||||
tDyn, v, indirect := s.a.taggedValue(ifaceObjId)
|
||||
if indirect {
|
||||
panic("indirect tagged object") // implement later
|
||||
}
|
||||
pts, ok := tmap.At(tDyn).(PointsToSet)
|
||||
if !ok {
|
||||
pts = PointsToSet{s.a, new(nodeset)}
|
||||
tmap.Set(tDyn, pts)
|
||||
}
|
||||
pts.pts.addAll(&s.a.nodes[v].solve.pts)
|
||||
}
|
||||
}
|
||||
return &tmap
|
||||
}
|
||||
|
||||
// Intersects reports whether this points-to set and the
|
||||
// argument points-to set contain common members.
|
||||
func (x PointsToSet) Intersects(y PointsToSet) bool {
|
||||
if x.pts == nil || y.pts == nil {
|
||||
return false
|
||||
}
|
||||
// This takes Θ(|x|+|y|) time.
|
||||
var z intsets.Sparse
|
||||
z.Intersection(&x.pts.Sparse, &y.pts.Sparse)
|
||||
return !z.IsEmpty()
|
||||
}
|
||||
|
||||
func (p Pointer) String() string {
|
||||
return fmt.Sprintf("n%d", p.n)
|
||||
}
|
||||
|
||||
// PointsTo returns the points-to set of this pointer.
|
||||
func (p Pointer) PointsTo() PointsToSet {
|
||||
if p.n == 0 {
|
||||
return PointsToSet{}
|
||||
}
|
||||
return PointsToSet{p.a, &p.a.nodes[p.n].solve.pts}
|
||||
}
|
||||
|
||||
// MayAlias reports whether the receiver pointer may alias
|
||||
// the argument pointer.
|
||||
func (p Pointer) MayAlias(q Pointer) bool {
|
||||
return p.PointsTo().Intersects(q.PointsTo())
|
||||
}
|
||||
|
||||
// DynamicTypes returns p.PointsTo().DynamicTypes().
|
||||
func (p Pointer) DynamicTypes() *typeutil.Map {
|
||||
return p.PointsTo().DynamicTypes()
|
||||
}
|
||||
61
vendor/golang.org/x/tools/go/pointer/callgraph.go
generated
vendored
Normal file
61
vendor/golang.org/x/tools/go/pointer/callgraph.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pointer
|
||||
|
||||
// This file defines the internal (context-sensitive) call graph.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
|
||||
"golang.org/x/tools/go/ssa"
|
||||
)
|
||||
|
||||
type cgnode struct {
|
||||
fn *ssa.Function
|
||||
obj nodeid // start of this contour's object block
|
||||
sites []*callsite // ordered list of callsites within this function
|
||||
callersite *callsite // where called from, if known; nil for shared contours
|
||||
}
|
||||
|
||||
// contour returns a description of this node's contour.
|
||||
func (n *cgnode) contour() string {
|
||||
if n.callersite == nil {
|
||||
return "shared contour"
|
||||
}
|
||||
if n.callersite.instr != nil {
|
||||
return fmt.Sprintf("as called from %s", n.callersite.instr.Parent())
|
||||
}
|
||||
return fmt.Sprintf("as called from intrinsic (targets=n%d)", n.callersite.targets)
|
||||
}
|
||||
|
||||
func (n *cgnode) String() string {
|
||||
return fmt.Sprintf("cg%d:%s", n.obj, n.fn)
|
||||
}
|
||||
|
||||
// A callsite represents a single call site within a cgnode;
|
||||
// it is implicitly context-sensitive.
|
||||
// callsites never represent calls to built-ins;
|
||||
// they are handled as intrinsics.
|
||||
//
|
||||
type callsite struct {
|
||||
targets nodeid // pts(·) contains objects for dynamically called functions
|
||||
instr ssa.CallInstruction // the call instruction; nil for synthetic/intrinsic
|
||||
}
|
||||
|
||||
func (c *callsite) String() string {
|
||||
if c.instr != nil {
|
||||
return c.instr.Common().Description()
|
||||
}
|
||||
return "synthetic function call"
|
||||
}
|
||||
|
||||
// pos returns the source position of this callsite, or token.NoPos if implicit.
|
||||
func (c *callsite) pos() token.Pos {
|
||||
if c.instr != nil {
|
||||
return c.instr.Pos()
|
||||
}
|
||||
return token.NoPos
|
||||
}
|
||||
149
vendor/golang.org/x/tools/go/pointer/constraint.go
generated
vendored
Normal file
149
vendor/golang.org/x/tools/go/pointer/constraint.go
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pointer
|
||||
|
||||
import "go/types"
|
||||
|
||||
type constraint interface {
|
||||
// For a complex constraint, returns the nodeid of the pointer
|
||||
// to which it is attached. For addr and copy, returns dst.
|
||||
ptr() nodeid
|
||||
|
||||
// renumber replaces each nodeid n in the constraint by mapping[n].
|
||||
renumber(mapping []nodeid)
|
||||
|
||||
// presolve is a hook for constraint-specific behaviour during
|
||||
// pre-solver optimization. Typical implementations mark as
|
||||
// indirect the set of nodes to which the solver will add copy
|
||||
// edges or PTS labels.
|
||||
presolve(h *hvn)
|
||||
|
||||
// solve is called for complex constraints when the pts for
|
||||
// the node to which they are attached has changed.
|
||||
solve(a *analysis, delta *nodeset)
|
||||
|
||||
String() string
|
||||
}
|
||||
|
||||
// dst = &src
|
||||
// pts(dst) ⊇ {src}
|
||||
// A base constraint used to initialize the solver's pt sets
|
||||
type addrConstraint struct {
|
||||
dst nodeid // (ptr)
|
||||
src nodeid
|
||||
}
|
||||
|
||||
func (c *addrConstraint) ptr() nodeid { return c.dst }
|
||||
func (c *addrConstraint) renumber(mapping []nodeid) {
|
||||
c.dst = mapping[c.dst]
|
||||
c.src = mapping[c.src]
|
||||
}
|
||||
|
||||
// dst = src
|
||||
// A simple constraint represented directly as a copyTo graph edge.
|
||||
type copyConstraint struct {
|
||||
dst nodeid // (ptr)
|
||||
src nodeid
|
||||
}
|
||||
|
||||
func (c *copyConstraint) ptr() nodeid { return c.dst }
|
||||
func (c *copyConstraint) renumber(mapping []nodeid) {
|
||||
c.dst = mapping[c.dst]
|
||||
c.src = mapping[c.src]
|
||||
}
|
||||
|
||||
// dst = src[offset]
|
||||
// A complex constraint attached to src (the pointer)
|
||||
type loadConstraint struct {
|
||||
offset uint32
|
||||
dst nodeid
|
||||
src nodeid // (ptr)
|
||||
}
|
||||
|
||||
func (c *loadConstraint) ptr() nodeid { return c.src }
|
||||
func (c *loadConstraint) renumber(mapping []nodeid) {
|
||||
c.dst = mapping[c.dst]
|
||||
c.src = mapping[c.src]
|
||||
}
|
||||
|
||||
// dst[offset] = src
|
||||
// A complex constraint attached to dst (the pointer)
|
||||
type storeConstraint struct {
|
||||
offset uint32
|
||||
dst nodeid // (ptr)
|
||||
src nodeid
|
||||
}
|
||||
|
||||
func (c *storeConstraint) ptr() nodeid { return c.dst }
|
||||
func (c *storeConstraint) renumber(mapping []nodeid) {
|
||||
c.dst = mapping[c.dst]
|
||||
c.src = mapping[c.src]
|
||||
}
|
||||
|
||||
// dst = &src.f or dst = &src[0]
|
||||
// A complex constraint attached to dst (the pointer)
|
||||
type offsetAddrConstraint struct {
|
||||
offset uint32
|
||||
dst nodeid
|
||||
src nodeid // (ptr)
|
||||
}
|
||||
|
||||
func (c *offsetAddrConstraint) ptr() nodeid { return c.src }
|
||||
func (c *offsetAddrConstraint) renumber(mapping []nodeid) {
|
||||
c.dst = mapping[c.dst]
|
||||
c.src = mapping[c.src]
|
||||
}
|
||||
|
||||
// dst = src.(typ) where typ is an interface
|
||||
// A complex constraint attached to src (the interface).
|
||||
// No representation change: pts(dst) and pts(src) contains tagged objects.
|
||||
type typeFilterConstraint struct {
|
||||
typ types.Type // an interface type
|
||||
dst nodeid
|
||||
src nodeid // (ptr)
|
||||
}
|
||||
|
||||
func (c *typeFilterConstraint) ptr() nodeid { return c.src }
|
||||
func (c *typeFilterConstraint) renumber(mapping []nodeid) {
|
||||
c.dst = mapping[c.dst]
|
||||
c.src = mapping[c.src]
|
||||
}
|
||||
|
||||
// dst = src.(typ) where typ is a concrete type
|
||||
// A complex constraint attached to src (the interface).
|
||||
//
|
||||
// If exact, only tagged objects identical to typ are untagged.
|
||||
// If !exact, tagged objects assignable to typ are untagged too.
|
||||
// The latter is needed for various reflect operators, e.g. Send.
|
||||
//
|
||||
// This entails a representation change:
|
||||
// pts(src) contains tagged objects,
|
||||
// pts(dst) contains their payloads.
|
||||
type untagConstraint struct {
|
||||
typ types.Type // a concrete type
|
||||
dst nodeid
|
||||
src nodeid // (ptr)
|
||||
exact bool
|
||||
}
|
||||
|
||||
func (c *untagConstraint) ptr() nodeid { return c.src }
|
||||
func (c *untagConstraint) renumber(mapping []nodeid) {
|
||||
c.dst = mapping[c.dst]
|
||||
c.src = mapping[c.src]
|
||||
}
|
||||
|
||||
// src.method(params...)
|
||||
// A complex constraint attached to iface.
|
||||
type invokeConstraint struct {
|
||||
method *types.Func // the abstract method
|
||||
iface nodeid // (ptr) the interface
|
||||
params nodeid // the start of the identity/params/results block
|
||||
}
|
||||
|
||||
func (c *invokeConstraint) ptr() nodeid { return c.iface }
|
||||
func (c *invokeConstraint) renumber(mapping []nodeid) {
|
||||
c.iface = mapping[c.iface]
|
||||
c.params = mapping[c.params]
|
||||
}
|
||||
610
vendor/golang.org/x/tools/go/pointer/doc.go
generated
vendored
Normal file
610
vendor/golang.org/x/tools/go/pointer/doc.go
generated
vendored
Normal file
@@ -0,0 +1,610 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
|
||||
Package pointer implements Andersen's analysis, an inclusion-based
|
||||
pointer analysis algorithm first described in (Andersen, 1994).
|
||||
|
||||
A pointer analysis relates every pointer expression in a whole program
|
||||
to the set of memory locations to which it might point. This
|
||||
information can be used to construct a call graph of the program that
|
||||
precisely represents the destinations of dynamic function and method
|
||||
calls. It can also be used to determine, for example, which pairs of
|
||||
channel operations operate on the same channel.
|
||||
|
||||
The package allows the client to request a set of expressions of
|
||||
interest for which the points-to information will be returned once the
|
||||
analysis is complete. In addition, the client may request that a
|
||||
callgraph is constructed. The example program in example_test.go
|
||||
demonstrates both of these features. Clients should not request more
|
||||
information than they need since it may increase the cost of the
|
||||
analysis significantly.
|
||||
|
||||
|
||||
CLASSIFICATION
|
||||
|
||||
Our algorithm is INCLUSION-BASED: the points-to sets for x and y will
|
||||
be related by pts(y) ⊇ pts(x) if the program contains the statement
|
||||
y = x.
|
||||
|
||||
It is FLOW-INSENSITIVE: it ignores all control flow constructs and the
|
||||
order of statements in a program. It is therefore a "MAY ALIAS"
|
||||
analysis: its facts are of the form "P may/may not point to L",
|
||||
not "P must point to L".
|
||||
|
||||
It is FIELD-SENSITIVE: it builds separate points-to sets for distinct
|
||||
fields, such as x and y in struct { x, y *int }.
|
||||
|
||||
It is mostly CONTEXT-INSENSITIVE: most functions are analyzed once,
|
||||
so values can flow in at one call to the function and return out at
|
||||
another. Only some smaller functions are analyzed with consideration
|
||||
of their calling context.
|
||||
|
||||
It has a CONTEXT-SENSITIVE HEAP: objects are named by both allocation
|
||||
site and context, so the objects returned by two distinct calls to f:
|
||||
func f() *T { return new(T) }
|
||||
are distinguished up to the limits of the calling context.
|
||||
|
||||
It is a WHOLE PROGRAM analysis: it requires SSA-form IR for the
|
||||
complete Go program and summaries for native code.
|
||||
|
||||
See the (Hind, PASTE'01) survey paper for an explanation of these terms.
|
||||
|
||||
|
||||
SOUNDNESS
|
||||
|
||||
The analysis is fully sound when invoked on pure Go programs that do not
|
||||
use reflection or unsafe.Pointer conversions. In other words, if there
|
||||
is any possible execution of the program in which pointer P may point to
|
||||
object O, the analysis will report that fact.
|
||||
|
||||
|
||||
REFLECTION
|
||||
|
||||
By default, the "reflect" library is ignored by the analysis, as if all
|
||||
its functions were no-ops, but if the client enables the Reflection flag,
|
||||
the analysis will make a reasonable attempt to model the effects of
|
||||
calls into this library. However, this comes at a significant
|
||||
performance cost, and not all features of that library are yet
|
||||
implemented. In addition, some simplifying approximations must be made
|
||||
to ensure that the analysis terminates; for example, reflection can be
|
||||
used to construct an infinite set of types and values of those types,
|
||||
but the analysis arbitrarily bounds the depth of such types.
|
||||
|
||||
Most but not all reflection operations are supported.
|
||||
In particular, addressable reflect.Values are not yet implemented, so
|
||||
operations such as (reflect.Value).Set have no analytic effect.
|
||||
|
||||
|
||||
UNSAFE POINTER CONVERSIONS
|
||||
|
||||
The pointer analysis makes no attempt to understand aliasing between the
|
||||
operand x and result y of an unsafe.Pointer conversion:
|
||||
y = (*T)(unsafe.Pointer(x))
|
||||
It is as if the conversion allocated an entirely new object:
|
||||
y = new(T)
|
||||
|
||||
|
||||
NATIVE CODE
|
||||
|
||||
The analysis cannot model the aliasing effects of functions written in
|
||||
languages other than Go, such as runtime intrinsics in C or assembly, or
|
||||
code accessed via cgo. The result is as if such functions are no-ops.
|
||||
However, various important intrinsics are understood by the analysis,
|
||||
along with built-ins such as append.
|
||||
|
||||
The analysis currently provides no way for users to specify the aliasing
|
||||
effects of native code.
|
||||
|
||||
------------------------------------------------------------------------
|
||||
|
||||
IMPLEMENTATION
|
||||
|
||||
The remaining documentation is intended for package maintainers and
|
||||
pointer analysis specialists. Maintainers should have a solid
|
||||
understanding of the referenced papers (especially those by H&L and PKH)
|
||||
before making making significant changes.
|
||||
|
||||
The implementation is similar to that described in (Pearce et al,
|
||||
PASTE'04). Unlike many algorithms which interleave constraint
|
||||
generation and solving, constructing the callgraph as they go, this
|
||||
implementation for the most part observes a phase ordering (generation
|
||||
before solving), with only simple (copy) constraints being generated
|
||||
during solving. (The exception is reflection, which creates various
|
||||
constraints during solving as new types flow to reflect.Value
|
||||
operations.) This improves the traction of presolver optimisations,
|
||||
but imposes certain restrictions, e.g. potential context sensitivity
|
||||
is limited since all variants must be created a priori.
|
||||
|
||||
|
||||
TERMINOLOGY
|
||||
|
||||
A type is said to be "pointer-like" if it is a reference to an object.
|
||||
Pointer-like types include pointers and also interfaces, maps, channels,
|
||||
functions and slices.
|
||||
|
||||
We occasionally use C's x->f notation to distinguish the case where x
|
||||
is a struct pointer from x.f where is a struct value.
|
||||
|
||||
Pointer analysis literature (and our comments) often uses the notation
|
||||
dst=*src+offset to mean something different than what it means in Go.
|
||||
It means: for each node index p in pts(src), the node index p+offset is
|
||||
in pts(dst). Similarly *dst+offset=src is used for store constraints
|
||||
and dst=src+offset for offset-address constraints.
|
||||
|
||||
|
||||
NODES
|
||||
|
||||
Nodes are the key datastructure of the analysis, and have a dual role:
|
||||
they represent both constraint variables (equivalence classes of
|
||||
pointers) and members of points-to sets (things that can be pointed
|
||||
at, i.e. "labels").
|
||||
|
||||
Nodes are naturally numbered. The numbering enables compact
|
||||
representations of sets of nodes such as bitvectors (or BDDs); and the
|
||||
ordering enables a very cheap way to group related nodes together. For
|
||||
example, passing n parameters consists of generating n parallel
|
||||
constraints from caller+i to callee+i for 0<=i<n.
|
||||
|
||||
The zero nodeid means "not a pointer". For simplicity, we generate flow
|
||||
constraints even for non-pointer types such as int. The pointer
|
||||
equivalence (PE) presolver optimization detects which variables cannot
|
||||
point to anything; this includes not only all variables of non-pointer
|
||||
types (such as int) but also variables of pointer-like types if they are
|
||||
always nil, or are parameters to a function that is never called.
|
||||
|
||||
Each node represents a scalar part of a value or object.
|
||||
Aggregate types (structs, tuples, arrays) are recursively flattened
|
||||
out into a sequential list of scalar component types, and all the
|
||||
elements of an array are represented by a single node. (The
|
||||
flattening of a basic type is a list containing a single node.)
|
||||
|
||||
Nodes are connected into a graph with various kinds of labelled edges:
|
||||
simple edges (or copy constraints) represent value flow. Complex
|
||||
edges (load, store, etc) trigger the creation of new simple edges
|
||||
during the solving phase.
|
||||
|
||||
|
||||
OBJECTS
|
||||
|
||||
Conceptually, an "object" is a contiguous sequence of nodes denoting
|
||||
an addressable location: something that a pointer can point to. The
|
||||
first node of an object has a non-nil obj field containing information
|
||||
about the allocation: its size, context, and ssa.Value.
|
||||
|
||||
Objects include:
|
||||
- functions and globals;
|
||||
- variable allocations in the stack frame or heap;
|
||||
- maps, channels and slices created by calls to make();
|
||||
- allocations to construct an interface;
|
||||
- allocations caused by conversions, e.g. []byte(str).
|
||||
- arrays allocated by calls to append();
|
||||
|
||||
Many objects have no Go types. For example, the func, map and chan type
|
||||
kinds in Go are all varieties of pointers, but their respective objects
|
||||
are actual functions (executable code), maps (hash tables), and channels
|
||||
(synchronized queues). Given the way we model interfaces, they too are
|
||||
pointers to "tagged" objects with no Go type. And an *ssa.Global denotes
|
||||
the address of a global variable, but the object for a Global is the
|
||||
actual data. So, the types of an ssa.Value that creates an object is
|
||||
"off by one indirection": a pointer to the object.
|
||||
|
||||
The individual nodes of an object are sometimes referred to as "labels".
|
||||
|
||||
For uniformity, all objects have a non-zero number of fields, even those
|
||||
of the empty type struct{}. (All arrays are treated as if of length 1,
|
||||
so there are no empty arrays. The empty tuple is never address-taken,
|
||||
so is never an object.)
|
||||
|
||||
|
||||
TAGGED OBJECTS
|
||||
|
||||
An tagged object has the following layout:
|
||||
|
||||
T -- obj.flags ⊇ {otTagged}
|
||||
v
|
||||
...
|
||||
|
||||
The T node's typ field is the dynamic type of the "payload": the value
|
||||
v which follows, flattened out. The T node's obj has the otTagged
|
||||
flag.
|
||||
|
||||
Tagged objects are needed when generalizing across types: interfaces,
|
||||
reflect.Values, reflect.Types. Each of these three types is modelled
|
||||
as a pointer that exclusively points to tagged objects.
|
||||
|
||||
Tagged objects may be indirect (obj.flags ⊇ {otIndirect}) meaning that
|
||||
the value v is not of type T but *T; this is used only for
|
||||
reflect.Values that represent lvalues. (These are not implemented yet.)
|
||||
|
||||
|
||||
ANALYSIS ABSTRACTION OF EACH TYPE
|
||||
|
||||
Variables of the following "scalar" types may be represented by a
|
||||
single node: basic types, pointers, channels, maps, slices, 'func'
|
||||
pointers, interfaces.
|
||||
|
||||
Pointers
|
||||
Nothing to say here, oddly.
|
||||
|
||||
Basic types (bool, string, numbers, unsafe.Pointer)
|
||||
Currently all fields in the flattening of a type, including
|
||||
non-pointer basic types such as int, are represented in objects and
|
||||
values. Though non-pointer nodes within values are uninteresting,
|
||||
non-pointer nodes in objects may be useful (if address-taken)
|
||||
because they permit the analysis to deduce, in this example,
|
||||
|
||||
var s struct{ ...; x int; ... }
|
||||
p := &s.x
|
||||
|
||||
that p points to s.x. If we ignored such object fields, we could only
|
||||
say that p points somewhere within s.
|
||||
|
||||
All other basic types are ignored. Expressions of these types have
|
||||
zero nodeid, and fields of these types within aggregate other types
|
||||
are omitted.
|
||||
|
||||
unsafe.Pointers are not modelled as pointers, so a conversion of an
|
||||
unsafe.Pointer to *T is (unsoundly) treated equivalent to new(T).
|
||||
|
||||
Channels
|
||||
An expression of type 'chan T' is a kind of pointer that points
|
||||
exclusively to channel objects, i.e. objects created by MakeChan (or
|
||||
reflection).
|
||||
|
||||
'chan T' is treated like *T.
|
||||
*ssa.MakeChan is treated as equivalent to new(T).
|
||||
*ssa.Send and receive (*ssa.UnOp(ARROW)) and are equivalent to store
|
||||
and load.
|
||||
|
||||
Maps
|
||||
An expression of type 'map[K]V' is a kind of pointer that points
|
||||
exclusively to map objects, i.e. objects created by MakeMap (or
|
||||
reflection).
|
||||
|
||||
map K[V] is treated like *M where M = struct{k K; v V}.
|
||||
*ssa.MakeMap is equivalent to new(M).
|
||||
*ssa.MapUpdate is equivalent to *y=x where *y and x have type M.
|
||||
*ssa.Lookup is equivalent to y=x.v where x has type *M.
|
||||
|
||||
Slices
|
||||
A slice []T, which dynamically resembles a struct{array *T, len, cap int},
|
||||
is treated as if it were just a *T pointer; the len and cap fields are
|
||||
ignored.
|
||||
|
||||
*ssa.MakeSlice is treated like new([1]T): an allocation of a
|
||||
singleton array.
|
||||
*ssa.Index on a slice is equivalent to a load.
|
||||
*ssa.IndexAddr on a slice returns the address of the sole element of the
|
||||
slice, i.e. the same address.
|
||||
*ssa.Slice is treated as a simple copy.
|
||||
|
||||
Functions
|
||||
An expression of type 'func...' is a kind of pointer that points
|
||||
exclusively to function objects.
|
||||
|
||||
A function object has the following layout:
|
||||
|
||||
identity -- typ:*types.Signature; obj.flags ⊇ {otFunction}
|
||||
params_0 -- (the receiver, if a method)
|
||||
...
|
||||
params_n-1
|
||||
results_0
|
||||
...
|
||||
results_m-1
|
||||
|
||||
There may be multiple function objects for the same *ssa.Function
|
||||
due to context-sensitive treatment of some functions.
|
||||
|
||||
The first node is the function's identity node.
|
||||
Associated with every callsite is a special "targets" variable,
|
||||
whose pts() contains the identity node of each function to which
|
||||
the call may dispatch. Identity words are not otherwise used during
|
||||
the analysis, but we construct the call graph from the pts()
|
||||
solution for such nodes.
|
||||
|
||||
The following block of contiguous nodes represents the flattened-out
|
||||
types of the parameters ("P-block") and results ("R-block") of the
|
||||
function object.
|
||||
|
||||
The treatment of free variables of closures (*ssa.FreeVar) is like
|
||||
that of global variables; it is not context-sensitive.
|
||||
*ssa.MakeClosure instructions create copy edges to Captures.
|
||||
|
||||
A Go value of type 'func' (i.e. a pointer to one or more functions)
|
||||
is a pointer whose pts() contains function objects. The valueNode()
|
||||
for an *ssa.Function returns a singleton for that function.
|
||||
|
||||
Interfaces
|
||||
An expression of type 'interface{...}' is a kind of pointer that
|
||||
points exclusively to tagged objects. All tagged objects pointed to
|
||||
by an interface are direct (the otIndirect flag is clear) and
|
||||
concrete (the tag type T is not itself an interface type). The
|
||||
associated ssa.Value for an interface's tagged objects may be an
|
||||
*ssa.MakeInterface instruction, or nil if the tagged object was
|
||||
created by an instrinsic (e.g. reflection).
|
||||
|
||||
Constructing an interface value causes generation of constraints for
|
||||
all of the concrete type's methods; we can't tell a priori which
|
||||
ones may be called.
|
||||
|
||||
TypeAssert y = x.(T) is implemented by a dynamic constraint
|
||||
triggered by each tagged object O added to pts(x): a typeFilter
|
||||
constraint if T is an interface type, or an untag constraint if T is
|
||||
a concrete type. A typeFilter tests whether O.typ implements T; if
|
||||
so, O is added to pts(y). An untagFilter tests whether O.typ is
|
||||
assignable to T,and if so, a copy edge O.v -> y is added.
|
||||
|
||||
ChangeInterface is a simple copy because the representation of
|
||||
tagged objects is independent of the interface type (in contrast
|
||||
to the "method tables" approach used by the gc runtime).
|
||||
|
||||
y := Invoke x.m(...) is implemented by allocating contiguous P/R
|
||||
blocks for the callsite and adding a dynamic rule triggered by each
|
||||
tagged object added to pts(x). The rule adds param/results copy
|
||||
edges to/from each discovered concrete method.
|
||||
|
||||
(Q. Why do we model an interface as a pointer to a pair of type and
|
||||
value, rather than as a pair of a pointer to type and a pointer to
|
||||
value?
|
||||
A. Control-flow joins would merge interfaces ({T1}, {V1}) and ({T2},
|
||||
{V2}) to make ({T1,T2}, {V1,V2}), leading to the infeasible and
|
||||
type-unsafe combination (T1,V2). Treating the value and its concrete
|
||||
type as inseparable makes the analysis type-safe.)
|
||||
|
||||
reflect.Value
|
||||
A reflect.Value is modelled very similar to an interface{}, i.e. as
|
||||
a pointer exclusively to tagged objects, but with two generalizations.
|
||||
|
||||
1) a reflect.Value that represents an lvalue points to an indirect
|
||||
(obj.flags ⊇ {otIndirect}) tagged object, which has a similar
|
||||
layout to an tagged object except that the value is a pointer to
|
||||
the dynamic type. Indirect tagged objects preserve the correct
|
||||
aliasing so that mutations made by (reflect.Value).Set can be
|
||||
observed.
|
||||
|
||||
Indirect objects only arise when an lvalue is derived from an
|
||||
rvalue by indirection, e.g. the following code:
|
||||
|
||||
type S struct { X T }
|
||||
var s S
|
||||
var i interface{} = &s // i points to a *S-tagged object (from MakeInterface)
|
||||
v1 := reflect.ValueOf(i) // v1 points to same *S-tagged object as i
|
||||
v2 := v1.Elem() // v2 points to an indirect S-tagged object, pointing to s
|
||||
v3 := v2.FieldByName("X") // v3 points to an indirect int-tagged object, pointing to s.X
|
||||
v3.Set(y) // pts(s.X) ⊇ pts(y)
|
||||
|
||||
Whether indirect or not, the concrete type of the tagged object
|
||||
corresponds to the user-visible dynamic type, and the existence
|
||||
of a pointer is an implementation detail.
|
||||
|
||||
(NB: indirect tagged objects are not yet implemented)
|
||||
|
||||
2) The dynamic type tag of a tagged object pointed to by a
|
||||
reflect.Value may be an interface type; it need not be concrete.
|
||||
|
||||
This arises in code such as this:
|
||||
tEface := reflect.TypeOf(new(interface{}).Elem() // interface{}
|
||||
eface := reflect.Zero(tEface)
|
||||
pts(eface) is a singleton containing an interface{}-tagged
|
||||
object. That tagged object's payload is an interface{} value,
|
||||
i.e. the pts of the payload contains only concrete-tagged
|
||||
objects, although in this example it's the zero interface{} value,
|
||||
so its pts is empty.
|
||||
|
||||
reflect.Type
|
||||
Just as in the real "reflect" library, we represent a reflect.Type
|
||||
as an interface whose sole implementation is the concrete type,
|
||||
*reflect.rtype. (This choice is forced on us by go/types: clients
|
||||
cannot fabricate types with arbitrary method sets.)
|
||||
|
||||
rtype instances are canonical: there is at most one per dynamic
|
||||
type. (rtypes are in fact large structs but since identity is all
|
||||
that matters, we represent them by a single node.)
|
||||
|
||||
The payload of each *rtype-tagged object is an *rtype pointer that
|
||||
points to exactly one such canonical rtype object. We exploit this
|
||||
by setting the node.typ of the payload to the dynamic type, not
|
||||
'*rtype'. This saves us an indirection in each resolution rule. As
|
||||
an optimisation, *rtype-tagged objects are canonicalized too.
|
||||
|
||||
|
||||
Aggregate types:
|
||||
|
||||
Aggregate types are treated as if all directly contained
|
||||
aggregates are recursively flattened out.
|
||||
|
||||
Structs
|
||||
*ssa.Field y = x.f creates a simple edge to y from x's node at f's offset.
|
||||
|
||||
*ssa.FieldAddr y = &x->f requires a dynamic closure rule to create
|
||||
simple edges for each struct discovered in pts(x).
|
||||
|
||||
The nodes of a struct consist of a special 'identity' node (whose
|
||||
type is that of the struct itself), followed by the nodes for all
|
||||
the struct's fields, recursively flattened out. A pointer to the
|
||||
struct is a pointer to its identity node. That node allows us to
|
||||
distinguish a pointer to a struct from a pointer to its first field.
|
||||
|
||||
Field offsets are logical field offsets (plus one for the identity
|
||||
node), so the sizes of the fields can be ignored by the analysis.
|
||||
|
||||
(The identity node is non-traditional but enables the distinction
|
||||
described above, which is valuable for code comprehension tools.
|
||||
Typical pointer analyses for C, whose purpose is compiler
|
||||
optimization, must soundly model unsafe.Pointer (void*) conversions,
|
||||
and this requires fidelity to the actual memory layout using physical
|
||||
field offsets.)
|
||||
|
||||
*ssa.Field y = x.f creates a simple edge to y from x's node at f's offset.
|
||||
|
||||
*ssa.FieldAddr y = &x->f requires a dynamic closure rule to create
|
||||
simple edges for each struct discovered in pts(x).
|
||||
|
||||
Arrays
|
||||
We model an array by an identity node (whose type is that of the
|
||||
array itself) followed by a node representing all the elements of
|
||||
the array; the analysis does not distinguish elements with different
|
||||
indices. Effectively, an array is treated like struct{elem T}, a
|
||||
load y=x[i] like y=x.elem, and a store x[i]=y like x.elem=y; the
|
||||
index i is ignored.
|
||||
|
||||
A pointer to an array is pointer to its identity node. (A slice is
|
||||
also a pointer to an array's identity node.) The identity node
|
||||
allows us to distinguish a pointer to an array from a pointer to one
|
||||
of its elements, but it is rather costly because it introduces more
|
||||
offset constraints into the system. Furthermore, sound treatment of
|
||||
unsafe.Pointer would require us to dispense with this node.
|
||||
|
||||
Arrays may be allocated by Alloc, by make([]T), by calls to append,
|
||||
and via reflection.
|
||||
|
||||
Tuples (T, ...)
|
||||
Tuples are treated like structs with naturally numbered fields.
|
||||
*ssa.Extract is analogous to *ssa.Field.
|
||||
|
||||
However, tuples have no identity field since by construction, they
|
||||
cannot be address-taken.
|
||||
|
||||
|
||||
FUNCTION CALLS
|
||||
|
||||
There are three kinds of function call:
|
||||
(1) static "call"-mode calls of functions.
|
||||
(2) dynamic "call"-mode calls of functions.
|
||||
(3) dynamic "invoke"-mode calls of interface methods.
|
||||
Cases 1 and 2 apply equally to methods and standalone functions.
|
||||
|
||||
Static calls.
|
||||
A static call consists three steps:
|
||||
- finding the function object of the callee;
|
||||
- creating copy edges from the actual parameter value nodes to the
|
||||
P-block in the function object (this includes the receiver if
|
||||
the callee is a method);
|
||||
- creating copy edges from the R-block in the function object to
|
||||
the value nodes for the result of the call.
|
||||
|
||||
A static function call is little more than two struct value copies
|
||||
between the P/R blocks of caller and callee:
|
||||
|
||||
callee.P = caller.P
|
||||
caller.R = callee.R
|
||||
|
||||
Context sensitivity
|
||||
|
||||
Static calls (alone) may be treated context sensitively,
|
||||
i.e. each callsite may cause a distinct re-analysis of the
|
||||
callee, improving precision. Our current context-sensitivity
|
||||
policy treats all intrinsics and getter/setter methods in this
|
||||
manner since such functions are small and seem like an obvious
|
||||
source of spurious confluences, though this has not yet been
|
||||
evaluated.
|
||||
|
||||
Dynamic function calls
|
||||
|
||||
Dynamic calls work in a similar manner except that the creation of
|
||||
copy edges occurs dynamically, in a similar fashion to a pair of
|
||||
struct copies in which the callee is indirect:
|
||||
|
||||
callee->P = caller.P
|
||||
caller.R = callee->R
|
||||
|
||||
(Recall that the function object's P- and R-blocks are contiguous.)
|
||||
|
||||
Interface method invocation
|
||||
|
||||
For invoke-mode calls, we create a params/results block for the
|
||||
callsite and attach a dynamic closure rule to the interface. For
|
||||
each new tagged object that flows to the interface, we look up
|
||||
the concrete method, find its function object, and connect its P/R
|
||||
blocks to the callsite's P/R blocks, adding copy edges to the graph
|
||||
during solving.
|
||||
|
||||
Recording call targets
|
||||
|
||||
The analysis notifies its clients of each callsite it encounters,
|
||||
passing a CallSite interface. Among other things, the CallSite
|
||||
contains a synthetic constraint variable ("targets") whose
|
||||
points-to solution includes the set of all function objects to
|
||||
which the call may dispatch.
|
||||
|
||||
It is via this mechanism that the callgraph is made available.
|
||||
Clients may also elect to be notified of callgraph edges directly;
|
||||
internally this just iterates all "targets" variables' pts(·)s.
|
||||
|
||||
|
||||
PRESOLVER
|
||||
|
||||
We implement Hash-Value Numbering (HVN), a pre-solver constraint
|
||||
optimization described in Hardekopf & Lin, SAS'07. This is documented
|
||||
in more detail in hvn.go. We intend to add its cousins HR and HU in
|
||||
future.
|
||||
|
||||
|
||||
SOLVER
|
||||
|
||||
The solver is currently a naive Andersen-style implementation; it does
|
||||
not perform online cycle detection, though we plan to add solver
|
||||
optimisations such as Hybrid- and Lazy- Cycle Detection from (Hardekopf
|
||||
& Lin, PLDI'07).
|
||||
|
||||
It uses difference propagation (Pearce et al, SQC'04) to avoid
|
||||
redundant re-triggering of closure rules for values already seen.
|
||||
|
||||
Points-to sets are represented using sparse bit vectors (similar to
|
||||
those used in LLVM and gcc), which are more space- and time-efficient
|
||||
than sets based on Go's built-in map type or dense bit vectors.
|
||||
|
||||
Nodes are permuted prior to solving so that object nodes (which may
|
||||
appear in points-to sets) are lower numbered than non-object (var)
|
||||
nodes. This improves the density of the set over which the PTSs
|
||||
range, and thus the efficiency of the representation.
|
||||
|
||||
Partly thanks to avoiding map iteration, the execution of the solver is
|
||||
100% deterministic, a great help during debugging.
|
||||
|
||||
|
||||
FURTHER READING
|
||||
|
||||
Andersen, L. O. 1994. Program analysis and specialization for the C
|
||||
programming language. Ph.D. dissertation. DIKU, University of
|
||||
Copenhagen.
|
||||
|
||||
David J. Pearce, Paul H. J. Kelly, and Chris Hankin. 2004. Efficient
|
||||
field-sensitive pointer analysis for C. In Proceedings of the 5th ACM
|
||||
SIGPLAN-SIGSOFT workshop on Program analysis for software tools and
|
||||
engineering (PASTE '04). ACM, New York, NY, USA, 37-42.
|
||||
http://doi.acm.org/10.1145/996821.996835
|
||||
|
||||
David J. Pearce, Paul H. J. Kelly, and Chris Hankin. 2004. Online
|
||||
Cycle Detection and Difference Propagation: Applications to Pointer
|
||||
Analysis. Software Quality Control 12, 4 (December 2004), 311-337.
|
||||
http://dx.doi.org/10.1023/B:SQJO.0000039791.93071.a2
|
||||
|
||||
David Grove and Craig Chambers. 2001. A framework for call graph
|
||||
construction algorithms. ACM Trans. Program. Lang. Syst. 23, 6
|
||||
(November 2001), 685-746.
|
||||
http://doi.acm.org/10.1145/506315.506316
|
||||
|
||||
Ben Hardekopf and Calvin Lin. 2007. The ant and the grasshopper: fast
|
||||
and accurate pointer analysis for millions of lines of code. In
|
||||
Proceedings of the 2007 ACM SIGPLAN conference on Programming language
|
||||
design and implementation (PLDI '07). ACM, New York, NY, USA, 290-299.
|
||||
http://doi.acm.org/10.1145/1250734.1250767
|
||||
|
||||
Ben Hardekopf and Calvin Lin. 2007. Exploiting pointer and location
|
||||
equivalence to optimize pointer analysis. In Proceedings of the 14th
|
||||
international conference on Static Analysis (SAS'07), Hanne Riis
|
||||
Nielson and Gilberto Filé (Eds.). Springer-Verlag, Berlin, Heidelberg,
|
||||
265-280.
|
||||
|
||||
Atanas Rountev and Satish Chandra. 2000. Off-line variable substitution
|
||||
for scaling points-to analysis. In Proceedings of the ACM SIGPLAN 2000
|
||||
conference on Programming language design and implementation (PLDI '00).
|
||||
ACM, New York, NY, USA, 47-56. DOI=10.1145/349299.349310
|
||||
http://doi.acm.org/10.1145/349299.349310
|
||||
|
||||
*/
|
||||
package pointer // import "golang.org/x/tools/go/pointer"
|
||||
1325
vendor/golang.org/x/tools/go/pointer/gen.go
generated
vendored
Normal file
1325
vendor/golang.org/x/tools/go/pointer/gen.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
973
vendor/golang.org/x/tools/go/pointer/hvn.go
generated
vendored
Normal file
973
vendor/golang.org/x/tools/go/pointer/hvn.go
generated
vendored
Normal file
@@ -0,0 +1,973 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pointer
|
||||
|
||||
// This file implements Hash-Value Numbering (HVN), a pre-solver
|
||||
// constraint optimization described in Hardekopf & Lin, SAS'07 (see
|
||||
// doc.go) that analyses the graph topology to determine which sets of
|
||||
// variables are "pointer equivalent" (PE), i.e. must have identical
|
||||
// points-to sets in the solution.
|
||||
//
|
||||
// A separate ("offline") graph is constructed. Its nodes are those of
|
||||
// the main-graph, plus an additional node *X for each pointer node X.
|
||||
// With this graph we can reason about the unknown points-to set of
|
||||
// dereferenced pointers. (We do not generalize this to represent
|
||||
// unknown fields x->f, perhaps because such fields would be numerous,
|
||||
// though it might be worth an experiment.)
|
||||
//
|
||||
// Nodes whose points-to relations are not entirely captured by the
|
||||
// graph are marked as "indirect": the *X nodes, the parameters of
|
||||
// address-taken functions (which includes all functions in method
|
||||
// sets), or nodes updated by the solver rules for reflection, etc.
|
||||
//
|
||||
// All addr (y=&x) nodes are initially assigned a pointer-equivalence
|
||||
// (PE) label equal to x's nodeid in the main graph. (These are the
|
||||
// only PE labels that are less than len(a.nodes).)
|
||||
//
|
||||
// All offsetAddr (y=&x.f) constraints are initially assigned a PE
|
||||
// label; such labels are memoized, keyed by (x, f), so that equivalent
|
||||
// nodes y as assigned the same label.
|
||||
//
|
||||
// Then we process each strongly connected component (SCC) of the graph
|
||||
// in topological order, assigning it a PE label based on the set P of
|
||||
// PE labels that flow to it from its immediate dependencies.
|
||||
//
|
||||
// If any node in P is "indirect", the entire SCC is assigned a fresh PE
|
||||
// label. Otherwise:
|
||||
//
|
||||
// |P|=0 if P is empty, all nodes in the SCC are non-pointers (e.g.
|
||||
// uninitialized variables, or formal params of dead functions)
|
||||
// and the SCC is assigned the PE label of zero.
|
||||
//
|
||||
// |P|=1 if P is a singleton, the SCC is assigned the same label as the
|
||||
// sole element of P.
|
||||
//
|
||||
// |P|>1 if P contains multiple labels, a unique label representing P is
|
||||
// invented and recorded in an hash table, so that other
|
||||
// equivalent SCCs may also be assigned this label, akin to
|
||||
// conventional hash-value numbering in a compiler.
|
||||
//
|
||||
// Finally, a renumbering is computed such that each node is replaced by
|
||||
// the lowest-numbered node with the same PE label. All constraints are
|
||||
// renumbered, and any resulting duplicates are eliminated.
|
||||
//
|
||||
// The only nodes that are not renumbered are the objects x in addr
|
||||
// (y=&x) constraints, since the ids of these nodes (and fields derived
|
||||
// from them via offsetAddr rules) are the elements of all points-to
|
||||
// sets, so they must remain as they are if we want the same solution.
|
||||
//
|
||||
// The solverStates (node.solve) for nodes in the same equivalence class
|
||||
// are linked together so that all nodes in the class have the same
|
||||
// solution. This avoids the need to renumber nodeids buried in
|
||||
// Queries, cgnodes, etc (like (*analysis).renumber() does) since only
|
||||
// the solution is needed.
|
||||
//
|
||||
// The result of HVN is that the number of distinct nodes and
|
||||
// constraints is reduced, but the solution is identical (almost---see
|
||||
// CROSS-CHECK below). In particular, both linear and cyclic chains of
|
||||
// copies are each replaced by a single node.
|
||||
//
|
||||
// Nodes and constraints created "online" (e.g. while solving reflection
|
||||
// constraints) are not subject to this optimization.
|
||||
//
|
||||
// PERFORMANCE
|
||||
//
|
||||
// In two benchmarks (guru and godoc), HVN eliminates about two thirds
|
||||
// of nodes, the majority accounted for by non-pointers: nodes of
|
||||
// non-pointer type, pointers that remain nil, formal parameters of dead
|
||||
// functions, nodes of untracked types, etc. It also reduces the number
|
||||
// of constraints, also by about two thirds, and the solving time by
|
||||
// 30--42%, although we must pay about 15% for the running time of HVN
|
||||
// itself. The benefit is greater for larger applications.
|
||||
//
|
||||
// There are many possible optimizations to improve the performance:
|
||||
// * Use fewer than 1:1 onodes to main graph nodes: many of the onodes
|
||||
// we create are not needed.
|
||||
// * HU (HVN with Union---see paper): coalesce "union" peLabels when
|
||||
// their expanded-out sets are equal.
|
||||
// * HR (HVN with deReference---see paper): this will require that we
|
||||
// apply HVN until fixed point, which may need more bookkeeping of the
|
||||
// correspondence of main nodes to onodes.
|
||||
// * Location Equivalence (see paper): have points-to sets contain not
|
||||
// locations but location-equivalence class labels, each representing
|
||||
// a set of locations.
|
||||
// * HVN with field-sensitive ref: model each of the fields of a
|
||||
// pointer-to-struct.
|
||||
//
|
||||
// CROSS-CHECK
|
||||
//
|
||||
// To verify the soundness of the optimization, when the
|
||||
// debugHVNCrossCheck option is enabled, we run the solver twice, once
|
||||
// before and once after running HVN, dumping the solution to disk, and
|
||||
// then we compare the results. If they are not identical, the analysis
|
||||
// panics.
|
||||
//
|
||||
// The solution dumped to disk includes only the N*N submatrix of the
|
||||
// complete solution where N is the number of nodes after generation.
|
||||
// In other words, we ignore pointer variables and objects created by
|
||||
// the solver itself, since their numbering depends on the solver order,
|
||||
// which is affected by the optimization. In any case, that's the only
|
||||
// part the client cares about.
|
||||
//
|
||||
// The cross-check is too strict and may fail spuriously. Although the
|
||||
// H&L paper describing HVN states that the solutions obtained should be
|
||||
// identical, this is not the case in practice because HVN can collapse
|
||||
// cycles involving *p even when pts(p)={}. Consider this example
|
||||
// distilled from testdata/hello.go:
|
||||
//
|
||||
// var x T
|
||||
// func f(p **T) {
|
||||
// t0 = *p
|
||||
// ...
|
||||
// t1 = φ(t0, &x)
|
||||
// *p = t1
|
||||
// }
|
||||
//
|
||||
// If f is dead code, we get:
|
||||
// unoptimized: pts(p)={} pts(t0)={} pts(t1)={&x}
|
||||
// optimized: pts(p)={} pts(t0)=pts(t1)=pts(*p)={&x}
|
||||
//
|
||||
// It's hard to argue that this is a bug: the result is sound and the
|
||||
// loss of precision is inconsequential---f is dead code, after all.
|
||||
// But unfortunately it limits the usefulness of the cross-check since
|
||||
// failures must be carefully analyzed. Ben Hardekopf suggests (in
|
||||
// personal correspondence) some approaches to mitigating it:
|
||||
//
|
||||
// If there is a node with an HVN points-to set that is a superset
|
||||
// of the NORM points-to set, then either it's a bug or it's a
|
||||
// result of this issue. If it's a result of this issue, then in
|
||||
// the offline constraint graph there should be a REF node inside
|
||||
// some cycle that reaches this node, and in the NORM solution the
|
||||
// pointer being dereferenced by that REF node should be the empty
|
||||
// set. If that isn't true then this is a bug. If it is true, then
|
||||
// you can further check that in the NORM solution the "extra"
|
||||
// points-to info in the HVN solution does in fact come from that
|
||||
// purported cycle (if it doesn't, then this is still a bug). If
|
||||
// you're doing the further check then you'll need to do it for
|
||||
// each "extra" points-to element in the HVN points-to set.
|
||||
//
|
||||
// There are probably ways to optimize these checks by taking
|
||||
// advantage of graph properties. For example, extraneous points-to
|
||||
// info will flow through the graph and end up in many
|
||||
// nodes. Rather than checking every node with extra info, you
|
||||
// could probably work out the "origin point" of the extra info and
|
||||
// just check there. Note that the check in the first bullet is
|
||||
// looking for soundness bugs, while the check in the second bullet
|
||||
// is looking for precision bugs; depending on your needs, you may
|
||||
// care more about one than the other.
|
||||
//
|
||||
// which we should evaluate. The cross-check is nonetheless invaluable
|
||||
// for all but one of the programs in the pointer_test suite.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/types"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/tools/container/intsets"
|
||||
)
|
||||
|
||||
// A peLabel is a pointer-equivalence label: two nodes with the same
|
||||
// peLabel have identical points-to solutions.
|
||||
//
|
||||
// The numbers are allocated consecutively like so:
|
||||
// 0 not a pointer
|
||||
// 1..N-1 addrConstraints (equals the constraint's .src field, hence sparse)
|
||||
// ... offsetAddr constraints
|
||||
// ... SCCs (with indirect nodes or multiple inputs)
|
||||
//
|
||||
// Each PE label denotes a set of pointers containing a single addr, a
|
||||
// single offsetAddr, or some set of other PE labels.
|
||||
//
|
||||
type peLabel int
|
||||
|
||||
type hvn struct {
|
||||
a *analysis
|
||||
N int // len(a.nodes) immediately after constraint generation
|
||||
log io.Writer // (optional) log of HVN lemmas
|
||||
onodes []*onode // nodes of the offline graph
|
||||
label peLabel // the next available PE label
|
||||
hvnLabel map[string]peLabel // hash-value numbering (PE label) for each set of onodeids
|
||||
stack []onodeid // DFS stack
|
||||
index int32 // next onode.index, from Tarjan's SCC algorithm
|
||||
|
||||
// For each distinct offsetAddrConstraint (src, offset) pair,
|
||||
// offsetAddrLabels records a unique PE label >= N.
|
||||
offsetAddrLabels map[offsetAddr]peLabel
|
||||
}
|
||||
|
||||
// The index of an node in the offline graph.
|
||||
// (Currently the first N align with the main nodes,
|
||||
// but this may change with HRU.)
|
||||
type onodeid uint32
|
||||
|
||||
// An onode is a node in the offline constraint graph.
|
||||
// (Where ambiguous, members of analysis.nodes are referred to as
|
||||
// "main graph" nodes.)
|
||||
//
|
||||
// Edges in the offline constraint graph (edges and implicit) point to
|
||||
// the source, i.e. against the flow of values: they are dependencies.
|
||||
// Implicit edges are used for SCC computation, but not for gathering
|
||||
// incoming labels.
|
||||
//
|
||||
type onode struct {
|
||||
rep onodeid // index of representative of SCC in offline constraint graph
|
||||
|
||||
edges intsets.Sparse // constraint edges X-->Y (this onode is X)
|
||||
implicit intsets.Sparse // implicit edges *X-->*Y (this onode is X)
|
||||
peLabels intsets.Sparse // set of peLabels are pointer-equivalent to this one
|
||||
indirect bool // node has points-to relations not represented in graph
|
||||
|
||||
// Tarjan's SCC algorithm
|
||||
index, lowlink int32 // Tarjan numbering
|
||||
scc int32 // -ve => on stack; 0 => unvisited; +ve => node is root of a found SCC
|
||||
}
|
||||
|
||||
type offsetAddr struct {
|
||||
ptr nodeid
|
||||
offset uint32
|
||||
}
|
||||
|
||||
// nextLabel issues the next unused pointer-equivalence label.
|
||||
func (h *hvn) nextLabel() peLabel {
|
||||
h.label++
|
||||
return h.label
|
||||
}
|
||||
|
||||
// ref(X) returns the index of the onode for *X.
|
||||
func (h *hvn) ref(id onodeid) onodeid {
|
||||
return id + onodeid(len(h.a.nodes))
|
||||
}
|
||||
|
||||
// hvn computes pointer-equivalence labels (peLabels) using the Hash-based
|
||||
// Value Numbering (HVN) algorithm described in Hardekopf & Lin, SAS'07.
|
||||
//
|
||||
func (a *analysis) hvn() {
|
||||
start("HVN")
|
||||
|
||||
if a.log != nil {
|
||||
fmt.Fprintf(a.log, "\n\n==== Pointer equivalence optimization\n\n")
|
||||
}
|
||||
|
||||
h := hvn{
|
||||
a: a,
|
||||
N: len(a.nodes),
|
||||
log: a.log,
|
||||
hvnLabel: make(map[string]peLabel),
|
||||
offsetAddrLabels: make(map[offsetAddr]peLabel),
|
||||
}
|
||||
|
||||
if h.log != nil {
|
||||
fmt.Fprintf(h.log, "\nCreating offline graph nodes...\n")
|
||||
}
|
||||
|
||||
// Create offline nodes. The first N nodes correspond to main
|
||||
// graph nodes; the next N are their corresponding ref() nodes.
|
||||
h.onodes = make([]*onode, 2*h.N)
|
||||
for id := range a.nodes {
|
||||
id := onodeid(id)
|
||||
h.onodes[id] = &onode{}
|
||||
h.onodes[h.ref(id)] = &onode{indirect: true}
|
||||
}
|
||||
|
||||
// Each node initially represents just itself.
|
||||
for id, o := range h.onodes {
|
||||
o.rep = onodeid(id)
|
||||
}
|
||||
|
||||
h.markIndirectNodes()
|
||||
|
||||
// Reserve the first N PE labels for addrConstraints.
|
||||
h.label = peLabel(h.N)
|
||||
|
||||
// Add offline constraint edges.
|
||||
if h.log != nil {
|
||||
fmt.Fprintf(h.log, "\nAdding offline graph edges...\n")
|
||||
}
|
||||
for _, c := range a.constraints {
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "; %s\n", c)
|
||||
}
|
||||
c.presolve(&h)
|
||||
}
|
||||
|
||||
// Find and collapse SCCs.
|
||||
if h.log != nil {
|
||||
fmt.Fprintf(h.log, "\nFinding SCCs...\n")
|
||||
}
|
||||
h.index = 1
|
||||
for id, o := range h.onodes {
|
||||
if id > 0 && o.index == 0 {
|
||||
// Start depth-first search at each unvisited node.
|
||||
h.visit(onodeid(id))
|
||||
}
|
||||
}
|
||||
|
||||
// Dump the solution
|
||||
// (NB: somewhat redundant with logging from simplify().)
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "\nPointer equivalences:\n")
|
||||
for id, o := range h.onodes {
|
||||
if id == 0 {
|
||||
continue
|
||||
}
|
||||
if id == int(h.N) {
|
||||
fmt.Fprintf(h.log, "---\n")
|
||||
}
|
||||
fmt.Fprintf(h.log, "o%d\t", id)
|
||||
if o.rep != onodeid(id) {
|
||||
fmt.Fprintf(h.log, "rep=o%d", o.rep)
|
||||
} else {
|
||||
fmt.Fprintf(h.log, "p%d", o.peLabels.Min())
|
||||
if o.indirect {
|
||||
fmt.Fprint(h.log, " indirect")
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(h.log)
|
||||
}
|
||||
}
|
||||
|
||||
// Simplify the main constraint graph
|
||||
h.simplify()
|
||||
|
||||
a.showCounts()
|
||||
|
||||
stop("HVN")
|
||||
}
|
||||
|
||||
// ---- constraint-specific rules ----
|
||||
|
||||
// dst := &src
|
||||
func (c *addrConstraint) presolve(h *hvn) {
|
||||
// Each object (src) is an initial PE label.
|
||||
label := peLabel(c.src) // label < N
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
// duplicate log messages are possible
|
||||
fmt.Fprintf(h.log, "\tcreate p%d: {&n%d}\n", label, c.src)
|
||||
}
|
||||
odst := onodeid(c.dst)
|
||||
osrc := onodeid(c.src)
|
||||
|
||||
// Assign dst this label.
|
||||
h.onodes[odst].peLabels.Insert(int(label))
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "\to%d has p%d\n", odst, label)
|
||||
}
|
||||
|
||||
h.addImplicitEdge(h.ref(odst), osrc) // *dst ~~> src.
|
||||
}
|
||||
|
||||
// dst = src
|
||||
func (c *copyConstraint) presolve(h *hvn) {
|
||||
odst := onodeid(c.dst)
|
||||
osrc := onodeid(c.src)
|
||||
h.addEdge(odst, osrc) // dst --> src
|
||||
h.addImplicitEdge(h.ref(odst), h.ref(osrc)) // *dst ~~> *src
|
||||
}
|
||||
|
||||
// dst = *src + offset
|
||||
func (c *loadConstraint) presolve(h *hvn) {
|
||||
odst := onodeid(c.dst)
|
||||
osrc := onodeid(c.src)
|
||||
if c.offset == 0 {
|
||||
h.addEdge(odst, h.ref(osrc)) // dst --> *src
|
||||
} else {
|
||||
// We don't interpret load-with-offset, e.g. results
|
||||
// of map value lookup, R-block of dynamic call, slice
|
||||
// copy/append, reflection.
|
||||
h.markIndirect(odst, "load with offset")
|
||||
}
|
||||
}
|
||||
|
||||
// *dst + offset = src
|
||||
func (c *storeConstraint) presolve(h *hvn) {
|
||||
odst := onodeid(c.dst)
|
||||
osrc := onodeid(c.src)
|
||||
if c.offset == 0 {
|
||||
h.onodes[h.ref(odst)].edges.Insert(int(osrc)) // *dst --> src
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "\to%d --> o%d\n", h.ref(odst), osrc)
|
||||
}
|
||||
} else {
|
||||
// We don't interpret store-with-offset.
|
||||
// See discussion of soundness at markIndirectNodes.
|
||||
}
|
||||
}
|
||||
|
||||
// dst = &src.offset
|
||||
func (c *offsetAddrConstraint) presolve(h *hvn) {
|
||||
// Give each distinct (addr, offset) pair a fresh PE label.
|
||||
// The cache performs CSE, effectively.
|
||||
key := offsetAddr{c.src, c.offset}
|
||||
label, ok := h.offsetAddrLabels[key]
|
||||
if !ok {
|
||||
label = h.nextLabel()
|
||||
h.offsetAddrLabels[key] = label
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "\tcreate p%d: {&n%d.#%d}\n",
|
||||
label, c.src, c.offset)
|
||||
}
|
||||
}
|
||||
|
||||
// Assign dst this label.
|
||||
h.onodes[c.dst].peLabels.Insert(int(label))
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "\to%d has p%d\n", c.dst, label)
|
||||
}
|
||||
}
|
||||
|
||||
// dst = src.(typ) where typ is an interface
|
||||
func (c *typeFilterConstraint) presolve(h *hvn) {
|
||||
h.markIndirect(onodeid(c.dst), "typeFilter result")
|
||||
}
|
||||
|
||||
// dst = src.(typ) where typ is concrete
|
||||
func (c *untagConstraint) presolve(h *hvn) {
|
||||
odst := onodeid(c.dst)
|
||||
for end := odst + onodeid(h.a.sizeof(c.typ)); odst < end; odst++ {
|
||||
h.markIndirect(odst, "untag result")
|
||||
}
|
||||
}
|
||||
|
||||
// dst = src.method(c.params...)
|
||||
func (c *invokeConstraint) presolve(h *hvn) {
|
||||
// All methods are address-taken functions, so
|
||||
// their formal P-blocks were already marked indirect.
|
||||
|
||||
// Mark the caller's targets node as indirect.
|
||||
sig := c.method.Type().(*types.Signature)
|
||||
id := c.params
|
||||
h.markIndirect(onodeid(c.params), "invoke targets node")
|
||||
id++
|
||||
|
||||
id += nodeid(h.a.sizeof(sig.Params()))
|
||||
|
||||
// Mark the caller's R-block as indirect.
|
||||
end := id + nodeid(h.a.sizeof(sig.Results()))
|
||||
for id < end {
|
||||
h.markIndirect(onodeid(id), "invoke R-block")
|
||||
id++
|
||||
}
|
||||
}
|
||||
|
||||
// markIndirectNodes marks as indirect nodes whose points-to relations
|
||||
// are not entirely captured by the offline graph, including:
|
||||
//
|
||||
// (a) All address-taken nodes (including the following nodes within
|
||||
// the same object). This is described in the paper.
|
||||
//
|
||||
// The most subtle cause of indirect nodes is the generation of
|
||||
// store-with-offset constraints since the offline graph doesn't
|
||||
// represent them. A global audit of constraint generation reveals the
|
||||
// following uses of store-with-offset:
|
||||
//
|
||||
// (b) genDynamicCall, for P-blocks of dynamically called functions,
|
||||
// to which dynamic copy edges will be added to them during
|
||||
// solving: from storeConstraint for standalone functions,
|
||||
// and from invokeConstraint for methods.
|
||||
// All such P-blocks must be marked indirect.
|
||||
// (c) MakeUpdate, to update the value part of a map object.
|
||||
// All MakeMap objects's value parts must be marked indirect.
|
||||
// (d) copyElems, to update the destination array.
|
||||
// All array elements must be marked indirect.
|
||||
//
|
||||
// Not all indirect marking happens here. ref() nodes are marked
|
||||
// indirect at construction, and each constraint's presolve() method may
|
||||
// mark additional nodes.
|
||||
//
|
||||
func (h *hvn) markIndirectNodes() {
|
||||
// (a) all address-taken nodes, plus all nodes following them
|
||||
// within the same object, since these may be indirectly
|
||||
// stored or address-taken.
|
||||
for _, c := range h.a.constraints {
|
||||
if c, ok := c.(*addrConstraint); ok {
|
||||
start := h.a.enclosingObj(c.src)
|
||||
end := start + nodeid(h.a.nodes[start].obj.size)
|
||||
for id := c.src; id < end; id++ {
|
||||
h.markIndirect(onodeid(id), "A-T object")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// (b) P-blocks of all address-taken functions.
|
||||
for id := 0; id < h.N; id++ {
|
||||
obj := h.a.nodes[id].obj
|
||||
|
||||
// TODO(adonovan): opt: if obj.cgn.fn is a method and
|
||||
// obj.cgn is not its shared contour, this is an
|
||||
// "inlined" static method call. We needn't consider it
|
||||
// address-taken since no invokeConstraint will affect it.
|
||||
|
||||
if obj != nil && obj.flags&otFunction != 0 && h.a.atFuncs[obj.cgn.fn] {
|
||||
// address-taken function
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "n%d is address-taken: %s\n", id, obj.cgn.fn)
|
||||
}
|
||||
h.markIndirect(onodeid(id), "A-T func identity")
|
||||
id++
|
||||
sig := obj.cgn.fn.Signature
|
||||
psize := h.a.sizeof(sig.Params())
|
||||
if sig.Recv() != nil {
|
||||
psize += h.a.sizeof(sig.Recv().Type())
|
||||
}
|
||||
for end := id + int(psize); id < end; id++ {
|
||||
h.markIndirect(onodeid(id), "A-T func P-block")
|
||||
}
|
||||
id--
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// (c) all map objects' value fields.
|
||||
for _, id := range h.a.mapValues {
|
||||
h.markIndirect(onodeid(id), "makemap.value")
|
||||
}
|
||||
|
||||
// (d) all array element objects.
|
||||
// TODO(adonovan): opt: can we do better?
|
||||
for id := 0; id < h.N; id++ {
|
||||
// Identity node for an object of array type?
|
||||
if tArray, ok := h.a.nodes[id].typ.(*types.Array); ok {
|
||||
// Mark the array element nodes indirect.
|
||||
// (Skip past the identity field.)
|
||||
for range h.a.flatten(tArray.Elem()) {
|
||||
id++
|
||||
h.markIndirect(onodeid(id), "array elem")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hvn) markIndirect(oid onodeid, comment string) {
|
||||
h.onodes[oid].indirect = true
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "\to%d is indirect: %s\n", oid, comment)
|
||||
}
|
||||
}
|
||||
|
||||
// Adds an edge dst-->src.
|
||||
// Note the unusual convention: edges are dependency (contraflow) edges.
|
||||
func (h *hvn) addEdge(odst, osrc onodeid) {
|
||||
h.onodes[odst].edges.Insert(int(osrc))
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "\to%d --> o%d\n", odst, osrc)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hvn) addImplicitEdge(odst, osrc onodeid) {
|
||||
h.onodes[odst].implicit.Insert(int(osrc))
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "\to%d ~~> o%d\n", odst, osrc)
|
||||
}
|
||||
}
|
||||
|
||||
// visit implements the depth-first search of Tarjan's SCC algorithm.
|
||||
// Precondition: x is canonical.
|
||||
func (h *hvn) visit(x onodeid) {
|
||||
h.checkCanonical(x)
|
||||
xo := h.onodes[x]
|
||||
xo.index = h.index
|
||||
xo.lowlink = h.index
|
||||
h.index++
|
||||
|
||||
h.stack = append(h.stack, x) // push
|
||||
assert(xo.scc == 0, "node revisited")
|
||||
xo.scc = -1
|
||||
|
||||
var deps []int
|
||||
deps = xo.edges.AppendTo(deps)
|
||||
deps = xo.implicit.AppendTo(deps)
|
||||
|
||||
for _, y := range deps {
|
||||
// Loop invariant: x is canonical.
|
||||
|
||||
y := h.find(onodeid(y))
|
||||
|
||||
if x == y {
|
||||
continue // nodes already coalesced
|
||||
}
|
||||
|
||||
xo := h.onodes[x]
|
||||
yo := h.onodes[y]
|
||||
|
||||
switch {
|
||||
case yo.scc > 0:
|
||||
// y is already a collapsed SCC
|
||||
|
||||
case yo.scc < 0:
|
||||
// y is on the stack, and thus in the current SCC.
|
||||
if yo.index < xo.lowlink {
|
||||
xo.lowlink = yo.index
|
||||
}
|
||||
|
||||
default:
|
||||
// y is unvisited; visit it now.
|
||||
h.visit(y)
|
||||
// Note: x and y are now non-canonical.
|
||||
|
||||
x = h.find(onodeid(x))
|
||||
|
||||
if yo.lowlink < xo.lowlink {
|
||||
xo.lowlink = yo.lowlink
|
||||
}
|
||||
}
|
||||
}
|
||||
h.checkCanonical(x)
|
||||
|
||||
// Is x the root of an SCC?
|
||||
if xo.lowlink == xo.index {
|
||||
// Coalesce all nodes in the SCC.
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "scc o%d\n", x)
|
||||
}
|
||||
for {
|
||||
// Pop y from stack.
|
||||
i := len(h.stack) - 1
|
||||
y := h.stack[i]
|
||||
h.stack = h.stack[:i]
|
||||
|
||||
h.checkCanonical(x)
|
||||
xo := h.onodes[x]
|
||||
h.checkCanonical(y)
|
||||
yo := h.onodes[y]
|
||||
|
||||
if xo == yo {
|
||||
// SCC is complete.
|
||||
xo.scc = 1
|
||||
h.labelSCC(x)
|
||||
break
|
||||
}
|
||||
h.coalesce(x, y)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Precondition: x is canonical.
|
||||
func (h *hvn) labelSCC(x onodeid) {
|
||||
h.checkCanonical(x)
|
||||
xo := h.onodes[x]
|
||||
xpe := &xo.peLabels
|
||||
|
||||
// All indirect nodes get new labels.
|
||||
if xo.indirect {
|
||||
label := h.nextLabel()
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "\tcreate p%d: indirect SCC\n", label)
|
||||
fmt.Fprintf(h.log, "\to%d has p%d\n", x, label)
|
||||
}
|
||||
|
||||
// Remove pre-labeling, in case a direct pre-labeled node was
|
||||
// merged with an indirect one.
|
||||
xpe.Clear()
|
||||
xpe.Insert(int(label))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Invariant: all peLabels sets are non-empty.
|
||||
// Those that are logically empty contain zero as their sole element.
|
||||
// No other sets contains zero.
|
||||
|
||||
// Find all labels coming in to the coalesced SCC node.
|
||||
for _, y := range xo.edges.AppendTo(nil) {
|
||||
y := h.find(onodeid(y))
|
||||
if y == x {
|
||||
continue // already coalesced
|
||||
}
|
||||
ype := &h.onodes[y].peLabels
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "\tedge from o%d = %s\n", y, ype)
|
||||
}
|
||||
|
||||
if ype.IsEmpty() {
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "\tnode has no PE label\n")
|
||||
}
|
||||
}
|
||||
assert(!ype.IsEmpty(), "incoming node has no PE label")
|
||||
|
||||
if ype.Has(0) {
|
||||
// {0} represents a non-pointer.
|
||||
assert(ype.Len() == 1, "PE set contains {0, ...}")
|
||||
} else {
|
||||
xpe.UnionWith(ype)
|
||||
}
|
||||
}
|
||||
|
||||
switch xpe.Len() {
|
||||
case 0:
|
||||
// SCC has no incoming non-zero PE labels: it is a non-pointer.
|
||||
xpe.Insert(0)
|
||||
|
||||
case 1:
|
||||
// already a singleton
|
||||
|
||||
default:
|
||||
// SCC has multiple incoming non-zero PE labels.
|
||||
// Find the canonical label representing this set.
|
||||
// We use String() as a fingerprint consistent with Equals().
|
||||
key := xpe.String()
|
||||
label, ok := h.hvnLabel[key]
|
||||
if !ok {
|
||||
label = h.nextLabel()
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "\tcreate p%d: union %s\n", label, xpe.String())
|
||||
}
|
||||
h.hvnLabel[key] = label
|
||||
}
|
||||
xpe.Clear()
|
||||
xpe.Insert(int(label))
|
||||
}
|
||||
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "\to%d has p%d\n", x, xpe.Min())
|
||||
}
|
||||
}
|
||||
|
||||
// coalesce combines two nodes in the offline constraint graph.
|
||||
// Precondition: x and y are canonical.
|
||||
func (h *hvn) coalesce(x, y onodeid) {
|
||||
xo := h.onodes[x]
|
||||
yo := h.onodes[y]
|
||||
|
||||
// x becomes y's canonical representative.
|
||||
yo.rep = x
|
||||
|
||||
if debugHVNVerbose && h.log != nil {
|
||||
fmt.Fprintf(h.log, "\tcoalesce o%d into o%d\n", y, x)
|
||||
}
|
||||
|
||||
// x accumulates y's edges.
|
||||
xo.edges.UnionWith(&yo.edges)
|
||||
yo.edges.Clear()
|
||||
|
||||
// x accumulates y's implicit edges.
|
||||
xo.implicit.UnionWith(&yo.implicit)
|
||||
yo.implicit.Clear()
|
||||
|
||||
// x accumulates y's pointer-equivalence labels.
|
||||
xo.peLabels.UnionWith(&yo.peLabels)
|
||||
yo.peLabels.Clear()
|
||||
|
||||
// x accumulates y's indirect flag.
|
||||
if yo.indirect {
|
||||
xo.indirect = true
|
||||
}
|
||||
}
|
||||
|
||||
// simplify computes a degenerate renumbering of nodeids from the PE
|
||||
// labels assigned by the hvn, and uses it to simplify the main
|
||||
// constraint graph, eliminating non-pointer nodes and duplicate
|
||||
// constraints.
|
||||
//
|
||||
func (h *hvn) simplify() {
|
||||
// canon maps each peLabel to its canonical main node.
|
||||
canon := make([]nodeid, h.label)
|
||||
for i := range canon {
|
||||
canon[i] = nodeid(h.N) // indicates "unset"
|
||||
}
|
||||
|
||||
// mapping maps each main node index to the index of the canonical node.
|
||||
mapping := make([]nodeid, len(h.a.nodes))
|
||||
|
||||
for id := range h.a.nodes {
|
||||
id := nodeid(id)
|
||||
if id == 0 {
|
||||
canon[0] = 0
|
||||
mapping[0] = 0
|
||||
continue
|
||||
}
|
||||
oid := h.find(onodeid(id))
|
||||
peLabels := &h.onodes[oid].peLabels
|
||||
assert(peLabels.Len() == 1, "PE class is not a singleton")
|
||||
label := peLabel(peLabels.Min())
|
||||
|
||||
canonId := canon[label]
|
||||
if canonId == nodeid(h.N) {
|
||||
// id becomes the representative of the PE label.
|
||||
canonId = id
|
||||
canon[label] = canonId
|
||||
|
||||
if h.a.log != nil {
|
||||
fmt.Fprintf(h.a.log, "\tpts(n%d) is canonical : \t(%s)\n",
|
||||
id, h.a.nodes[id].typ)
|
||||
}
|
||||
|
||||
} else {
|
||||
// Link the solver states for the two nodes.
|
||||
assert(h.a.nodes[canonId].solve != nil, "missing solver state")
|
||||
h.a.nodes[id].solve = h.a.nodes[canonId].solve
|
||||
|
||||
if h.a.log != nil {
|
||||
// TODO(adonovan): debug: reorganize the log so it prints
|
||||
// one line:
|
||||
// pe y = x1, ..., xn
|
||||
// for each canonical y. Requires allocation.
|
||||
fmt.Fprintf(h.a.log, "\tpts(n%d) = pts(n%d) : %s\n",
|
||||
id, canonId, h.a.nodes[id].typ)
|
||||
}
|
||||
}
|
||||
|
||||
mapping[id] = canonId
|
||||
}
|
||||
|
||||
// Renumber the constraints, eliminate duplicates, and eliminate
|
||||
// any containing non-pointers (n0).
|
||||
addrs := make(map[addrConstraint]bool)
|
||||
copys := make(map[copyConstraint]bool)
|
||||
loads := make(map[loadConstraint]bool)
|
||||
stores := make(map[storeConstraint]bool)
|
||||
offsetAddrs := make(map[offsetAddrConstraint]bool)
|
||||
untags := make(map[untagConstraint]bool)
|
||||
typeFilters := make(map[typeFilterConstraint]bool)
|
||||
invokes := make(map[invokeConstraint]bool)
|
||||
|
||||
nbefore := len(h.a.constraints)
|
||||
cc := h.a.constraints[:0] // in-situ compaction
|
||||
for _, c := range h.a.constraints {
|
||||
// Renumber.
|
||||
switch c := c.(type) {
|
||||
case *addrConstraint:
|
||||
// Don't renumber c.src since it is the label of
|
||||
// an addressable object and will appear in PT sets.
|
||||
c.dst = mapping[c.dst]
|
||||
default:
|
||||
c.renumber(mapping)
|
||||
}
|
||||
|
||||
if c.ptr() == 0 {
|
||||
continue // skip: constraint attached to non-pointer
|
||||
}
|
||||
|
||||
var dup bool
|
||||
switch c := c.(type) {
|
||||
case *addrConstraint:
|
||||
_, dup = addrs[*c]
|
||||
addrs[*c] = true
|
||||
|
||||
case *copyConstraint:
|
||||
if c.src == c.dst {
|
||||
continue // skip degenerate copies
|
||||
}
|
||||
if c.src == 0 {
|
||||
continue // skip copy from non-pointer
|
||||
}
|
||||
_, dup = copys[*c]
|
||||
copys[*c] = true
|
||||
|
||||
case *loadConstraint:
|
||||
if c.src == 0 {
|
||||
continue // skip load from non-pointer
|
||||
}
|
||||
_, dup = loads[*c]
|
||||
loads[*c] = true
|
||||
|
||||
case *storeConstraint:
|
||||
if c.src == 0 {
|
||||
continue // skip store from non-pointer
|
||||
}
|
||||
_, dup = stores[*c]
|
||||
stores[*c] = true
|
||||
|
||||
case *offsetAddrConstraint:
|
||||
if c.src == 0 {
|
||||
continue // skip offset from non-pointer
|
||||
}
|
||||
_, dup = offsetAddrs[*c]
|
||||
offsetAddrs[*c] = true
|
||||
|
||||
case *untagConstraint:
|
||||
if c.src == 0 {
|
||||
continue // skip untag of non-pointer
|
||||
}
|
||||
_, dup = untags[*c]
|
||||
untags[*c] = true
|
||||
|
||||
case *typeFilterConstraint:
|
||||
if c.src == 0 {
|
||||
continue // skip filter of non-pointer
|
||||
}
|
||||
_, dup = typeFilters[*c]
|
||||
typeFilters[*c] = true
|
||||
|
||||
case *invokeConstraint:
|
||||
if c.params == 0 {
|
||||
panic("non-pointer invoke.params")
|
||||
}
|
||||
if c.iface == 0 {
|
||||
continue // skip invoke on non-pointer
|
||||
}
|
||||
_, dup = invokes[*c]
|
||||
invokes[*c] = true
|
||||
|
||||
default:
|
||||
// We don't bother de-duping advanced constraints
|
||||
// (e.g. reflection) since they are uncommon.
|
||||
|
||||
// Eliminate constraints containing non-pointer nodeids.
|
||||
//
|
||||
// We use reflection to find the fields to avoid
|
||||
// adding yet another method to constraint.
|
||||
//
|
||||
// TODO(adonovan): experiment with a constraint
|
||||
// method that returns a slice of pointers to
|
||||
// nodeids fields to enable uniform iteration;
|
||||
// the renumber() method could be removed and
|
||||
// implemented using the new one.
|
||||
//
|
||||
// TODO(adonovan): opt: this is unsound since
|
||||
// some constraints still have an effect if one
|
||||
// of the operands is zero: rVCall, rVMapIndex,
|
||||
// rvSetMapIndex. Handle them specially.
|
||||
rtNodeid := reflect.TypeOf(nodeid(0))
|
||||
x := reflect.ValueOf(c).Elem()
|
||||
for i, nf := 0, x.NumField(); i < nf; i++ {
|
||||
f := x.Field(i)
|
||||
if f.Type() == rtNodeid {
|
||||
if f.Uint() == 0 {
|
||||
dup = true // skip it
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if dup {
|
||||
continue // skip duplicates
|
||||
}
|
||||
|
||||
cc = append(cc, c)
|
||||
}
|
||||
h.a.constraints = cc
|
||||
|
||||
if h.log != nil {
|
||||
fmt.Fprintf(h.log, "#constraints: was %d, now %d\n", nbefore, len(h.a.constraints))
|
||||
}
|
||||
}
|
||||
|
||||
// find returns the canonical onodeid for x.
|
||||
// (The onodes form a disjoint set forest.)
|
||||
func (h *hvn) find(x onodeid) onodeid {
|
||||
// TODO(adonovan): opt: this is a CPU hotspot. Try "union by rank".
|
||||
xo := h.onodes[x]
|
||||
rep := xo.rep
|
||||
if rep != x {
|
||||
rep = h.find(rep) // simple path compression
|
||||
xo.rep = rep
|
||||
}
|
||||
return rep
|
||||
}
|
||||
|
||||
func (h *hvn) checkCanonical(x onodeid) {
|
||||
if debugHVN {
|
||||
assert(x == h.find(x), "not canonical")
|
||||
}
|
||||
}
|
||||
|
||||
func assert(p bool, msg string) {
|
||||
if debugHVN && !p {
|
||||
panic("assertion failed: " + msg)
|
||||
}
|
||||
}
|
||||
361
vendor/golang.org/x/tools/go/pointer/intrinsics.go
generated
vendored
Normal file
361
vendor/golang.org/x/tools/go/pointer/intrinsics.go
generated
vendored
Normal file
@@ -0,0 +1,361 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pointer
|
||||
|
||||
// This package defines the treatment of intrinsics, i.e. library
|
||||
// functions requiring special analytical treatment.
|
||||
//
|
||||
// Most of these are C or assembly functions, but even some Go
|
||||
// functions require may special treatment if the analysis completely
|
||||
// replaces the implementation of an API such as reflection.
|
||||
|
||||
// TODO(adonovan): support a means of writing analytic summaries in
|
||||
// the target code, so that users can summarise the effects of their
|
||||
// own C functions using a snippet of Go.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/types"
|
||||
|
||||
"golang.org/x/tools/go/ssa"
|
||||
)
|
||||
|
||||
// Instances of 'intrinsic' generate analysis constraints for calls to
|
||||
// intrinsic functions.
|
||||
// Implementations may exploit information from the calling site
|
||||
// via cgn.callersite; for shared contours this is nil.
|
||||
type intrinsic func(a *analysis, cgn *cgnode)
|
||||
|
||||
// Initialized in explicit init() to defeat (spurious) initialization
|
||||
// cycle error.
|
||||
var intrinsicsByName = make(map[string]intrinsic)
|
||||
|
||||
func init() {
|
||||
// Key strings are from Function.String().
|
||||
// That little dot ۰ is an Arabic zero numeral (U+06F0),
|
||||
// categories [Nd].
|
||||
for name, fn := range map[string]intrinsic{
|
||||
// Other packages.
|
||||
"bytes.Equal": ext۰NoEffect,
|
||||
"bytes.IndexByte": ext۰NoEffect,
|
||||
"crypto/aes.decryptBlockAsm": ext۰NoEffect,
|
||||
"crypto/aes.encryptBlockAsm": ext۰NoEffect,
|
||||
"crypto/aes.expandKeyAsm": ext۰NoEffect,
|
||||
"crypto/aes.hasAsm": ext۰NoEffect,
|
||||
"crypto/md5.block": ext۰NoEffect,
|
||||
"crypto/rc4.xorKeyStream": ext۰NoEffect,
|
||||
"crypto/sha1.block": ext۰NoEffect,
|
||||
"crypto/sha256.block": ext۰NoEffect,
|
||||
"hash/crc32.castagnoliSSE42": ext۰NoEffect,
|
||||
"hash/crc32.haveSSE42": ext۰NoEffect,
|
||||
"math.Abs": ext۰NoEffect,
|
||||
"math.Acos": ext۰NoEffect,
|
||||
"math.Asin": ext۰NoEffect,
|
||||
"math.Atan": ext۰NoEffect,
|
||||
"math.Atan2": ext۰NoEffect,
|
||||
"math.Ceil": ext۰NoEffect,
|
||||
"math.Cos": ext۰NoEffect,
|
||||
"math.Dim": ext۰NoEffect,
|
||||
"math.Exp": ext۰NoEffect,
|
||||
"math.Exp2": ext۰NoEffect,
|
||||
"math.Expm1": ext۰NoEffect,
|
||||
"math.Float32bits": ext۰NoEffect,
|
||||
"math.Float32frombits": ext۰NoEffect,
|
||||
"math.Float64bits": ext۰NoEffect,
|
||||
"math.Float64frombits": ext۰NoEffect,
|
||||
"math.Floor": ext۰NoEffect,
|
||||
"math.Frexp": ext۰NoEffect,
|
||||
"math.Hypot": ext۰NoEffect,
|
||||
"math.Ldexp": ext۰NoEffect,
|
||||
"math.Log": ext۰NoEffect,
|
||||
"math.Log10": ext۰NoEffect,
|
||||
"math.Log1p": ext۰NoEffect,
|
||||
"math.Log2": ext۰NoEffect,
|
||||
"math.Max": ext۰NoEffect,
|
||||
"math.Min": ext۰NoEffect,
|
||||
"math.Mod": ext۰NoEffect,
|
||||
"math.Modf": ext۰NoEffect,
|
||||
"math.Remainder": ext۰NoEffect,
|
||||
"math.Sin": ext۰NoEffect,
|
||||
"math.Sincos": ext۰NoEffect,
|
||||
"math.Sqrt": ext۰NoEffect,
|
||||
"math.Tan": ext۰NoEffect,
|
||||
"math.Trunc": ext۰NoEffect,
|
||||
"math/big.addMulVVW": ext۰NoEffect,
|
||||
"math/big.addVV": ext۰NoEffect,
|
||||
"math/big.addVW": ext۰NoEffect,
|
||||
"math/big.bitLen": ext۰NoEffect,
|
||||
"math/big.divWVW": ext۰NoEffect,
|
||||
"math/big.divWW": ext۰NoEffect,
|
||||
"math/big.mulAddVWW": ext۰NoEffect,
|
||||
"math/big.mulWW": ext۰NoEffect,
|
||||
"math/big.shlVU": ext۰NoEffect,
|
||||
"math/big.shrVU": ext۰NoEffect,
|
||||
"math/big.subVV": ext۰NoEffect,
|
||||
"math/big.subVW": ext۰NoEffect,
|
||||
"net.runtime_Semacquire": ext۰NoEffect,
|
||||
"net.runtime_Semrelease": ext۰NoEffect,
|
||||
"net.runtime_pollClose": ext۰NoEffect,
|
||||
"net.runtime_pollOpen": ext۰NoEffect,
|
||||
"net.runtime_pollReset": ext۰NoEffect,
|
||||
"net.runtime_pollServerInit": ext۰NoEffect,
|
||||
"net.runtime_pollSetDeadline": ext۰NoEffect,
|
||||
"net.runtime_pollUnblock": ext۰NoEffect,
|
||||
"net.runtime_pollWait": ext۰NoEffect,
|
||||
"net.runtime_pollWaitCanceled": ext۰NoEffect,
|
||||
"os.epipecheck": ext۰NoEffect,
|
||||
// All other runtime functions are treated as NoEffect.
|
||||
"runtime.SetFinalizer": ext۰runtime۰SetFinalizer,
|
||||
"strings.IndexByte": ext۰NoEffect,
|
||||
"sync.runtime_Semacquire": ext۰NoEffect,
|
||||
"sync.runtime_Semrelease": ext۰NoEffect,
|
||||
"sync.runtime_Syncsemacquire": ext۰NoEffect,
|
||||
"sync.runtime_Syncsemcheck": ext۰NoEffect,
|
||||
"sync.runtime_Syncsemrelease": ext۰NoEffect,
|
||||
"sync.runtime_procPin": ext۰NoEffect,
|
||||
"sync.runtime_procUnpin": ext۰NoEffect,
|
||||
"sync.runtime_registerPool": ext۰NoEffect,
|
||||
"sync/atomic.AddInt32": ext۰NoEffect,
|
||||
"sync/atomic.AddInt64": ext۰NoEffect,
|
||||
"sync/atomic.AddUint32": ext۰NoEffect,
|
||||
"sync/atomic.AddUint64": ext۰NoEffect,
|
||||
"sync/atomic.AddUintptr": ext۰NoEffect,
|
||||
"sync/atomic.CompareAndSwapInt32": ext۰NoEffect,
|
||||
"sync/atomic.CompareAndSwapUint32": ext۰NoEffect,
|
||||
"sync/atomic.CompareAndSwapUint64": ext۰NoEffect,
|
||||
"sync/atomic.CompareAndSwapUintptr": ext۰NoEffect,
|
||||
"sync/atomic.LoadInt32": ext۰NoEffect,
|
||||
"sync/atomic.LoadInt64": ext۰NoEffect,
|
||||
"sync/atomic.LoadPointer": ext۰NoEffect, // ignore unsafe.Pointers
|
||||
"sync/atomic.LoadUint32": ext۰NoEffect,
|
||||
"sync/atomic.LoadUint64": ext۰NoEffect,
|
||||
"sync/atomic.LoadUintptr": ext۰NoEffect,
|
||||
"sync/atomic.StoreInt32": ext۰NoEffect,
|
||||
"sync/atomic.StorePointer": ext۰NoEffect, // ignore unsafe.Pointers
|
||||
"sync/atomic.StoreUint32": ext۰NoEffect,
|
||||
"sync/atomic.StoreUintptr": ext۰NoEffect,
|
||||
"syscall.Close": ext۰NoEffect,
|
||||
"syscall.Exit": ext۰NoEffect,
|
||||
"syscall.Getpid": ext۰NoEffect,
|
||||
"syscall.Getwd": ext۰NoEffect,
|
||||
"syscall.Kill": ext۰NoEffect,
|
||||
"syscall.RawSyscall": ext۰NoEffect,
|
||||
"syscall.RawSyscall6": ext۰NoEffect,
|
||||
"syscall.Syscall": ext۰NoEffect,
|
||||
"syscall.Syscall6": ext۰NoEffect,
|
||||
"syscall.runtime_AfterFork": ext۰NoEffect,
|
||||
"syscall.runtime_BeforeFork": ext۰NoEffect,
|
||||
"syscall.setenv_c": ext۰NoEffect,
|
||||
"time.Sleep": ext۰NoEffect,
|
||||
"time.now": ext۰NoEffect,
|
||||
"time.startTimer": ext۰time۰startTimer,
|
||||
"time.stopTimer": ext۰NoEffect,
|
||||
} {
|
||||
intrinsicsByName[name] = fn
|
||||
}
|
||||
}
|
||||
|
||||
// findIntrinsic returns the constraint generation function for an
|
||||
// intrinsic function fn, or nil if the function should be handled normally.
|
||||
//
|
||||
func (a *analysis) findIntrinsic(fn *ssa.Function) intrinsic {
|
||||
// Consult the *Function-keyed cache.
|
||||
// A cached nil indicates a normal non-intrinsic function.
|
||||
impl, ok := a.intrinsics[fn]
|
||||
if !ok {
|
||||
impl = intrinsicsByName[fn.String()] // may be nil
|
||||
|
||||
if a.isReflect(fn) {
|
||||
if !a.config.Reflection {
|
||||
impl = ext۰NoEffect // reflection disabled
|
||||
} else if impl == nil {
|
||||
// Ensure all "reflect" code is treated intrinsically.
|
||||
impl = ext۰NotYetImplemented
|
||||
}
|
||||
} else if impl == nil && fn.Pkg != nil && fn.Pkg.Pkg.Path() == "runtime" {
|
||||
// Ignore "runtime" (except SetFinalizer):
|
||||
// it has few interesting effects on aliasing
|
||||
// and is full of unsafe code we can't analyze.
|
||||
impl = ext۰NoEffect
|
||||
}
|
||||
|
||||
a.intrinsics[fn] = impl
|
||||
}
|
||||
return impl
|
||||
}
|
||||
|
||||
// isReflect reports whether fn belongs to the "reflect" package.
|
||||
func (a *analysis) isReflect(fn *ssa.Function) bool {
|
||||
if a.reflectValueObj == nil {
|
||||
return false // "reflect" package not loaded
|
||||
}
|
||||
reflectPackage := a.reflectValueObj.Pkg()
|
||||
if fn.Pkg != nil && fn.Pkg.Pkg == reflectPackage {
|
||||
return true
|
||||
}
|
||||
// Synthetic wrappers have a nil Pkg, so they slip through the
|
||||
// previous check. Check the receiver package.
|
||||
// TODO(adonovan): should synthetic wrappers have a non-nil Pkg?
|
||||
if recv := fn.Signature.Recv(); recv != nil {
|
||||
if named, ok := deref(recv.Type()).(*types.Named); ok {
|
||||
if named.Obj().Pkg() == reflectPackage {
|
||||
return true // e.g. wrapper of (reflect.Value).f
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// A trivial intrinsic suitable for any function that does not:
|
||||
// 1) induce aliases between its arguments or any global variables;
|
||||
// 2) call any functions; or
|
||||
// 3) create any labels.
|
||||
//
|
||||
// Many intrinsics (such as CompareAndSwapInt32) have a fourth kind of
|
||||
// effect: loading or storing through a pointer. Though these could
|
||||
// be significant, we deliberately ignore them because they are
|
||||
// generally not worth the effort.
|
||||
//
|
||||
// We sometimes violate condition #3 if the function creates only
|
||||
// non-function labels, as the control-flow graph is still sound.
|
||||
//
|
||||
func ext۰NoEffect(a *analysis, cgn *cgnode) {}
|
||||
|
||||
func ext۰NotYetImplemented(a *analysis, cgn *cgnode) {
|
||||
fn := cgn.fn
|
||||
a.warnf(fn.Pos(), "unsound: intrinsic treatment of %s not yet implemented", fn)
|
||||
}
|
||||
|
||||
// ---------- func runtime.SetFinalizer(x, f interface{}) ----------
|
||||
|
||||
// runtime.SetFinalizer(x, f)
|
||||
type runtimeSetFinalizerConstraint struct {
|
||||
targets nodeid // (indirect)
|
||||
f nodeid // (ptr)
|
||||
x nodeid
|
||||
}
|
||||
|
||||
func (c *runtimeSetFinalizerConstraint) ptr() nodeid { return c.f }
|
||||
func (c *runtimeSetFinalizerConstraint) presolve(h *hvn) {
|
||||
h.markIndirect(onodeid(c.targets), "SetFinalizer.targets")
|
||||
}
|
||||
func (c *runtimeSetFinalizerConstraint) renumber(mapping []nodeid) {
|
||||
c.targets = mapping[c.targets]
|
||||
c.f = mapping[c.f]
|
||||
c.x = mapping[c.x]
|
||||
}
|
||||
|
||||
func (c *runtimeSetFinalizerConstraint) String() string {
|
||||
return fmt.Sprintf("runtime.SetFinalizer(n%d, n%d)", c.x, c.f)
|
||||
}
|
||||
|
||||
func (c *runtimeSetFinalizerConstraint) solve(a *analysis, delta *nodeset) {
|
||||
for _, fObj := range delta.AppendTo(a.deltaSpace) {
|
||||
tDyn, f, indirect := a.taggedValue(nodeid(fObj))
|
||||
if indirect {
|
||||
// TODO(adonovan): we'll need to implement this
|
||||
// when we start creating indirect tagged objects.
|
||||
panic("indirect tagged object")
|
||||
}
|
||||
|
||||
tSig, ok := tDyn.Underlying().(*types.Signature)
|
||||
if !ok {
|
||||
continue // not a function
|
||||
}
|
||||
if tSig.Recv() != nil {
|
||||
panic(tSig)
|
||||
}
|
||||
if tSig.Params().Len() != 1 {
|
||||
continue // not a unary function
|
||||
}
|
||||
|
||||
// Extract x to tmp.
|
||||
tx := tSig.Params().At(0).Type()
|
||||
tmp := a.addNodes(tx, "SetFinalizer.tmp")
|
||||
a.typeAssert(tx, tmp, c.x, false)
|
||||
|
||||
// Call f(tmp).
|
||||
a.store(f, tmp, 1, a.sizeof(tx))
|
||||
|
||||
// Add dynamic call target.
|
||||
if a.onlineCopy(c.targets, f) {
|
||||
a.addWork(c.targets)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ext۰runtime۰SetFinalizer(a *analysis, cgn *cgnode) {
|
||||
// This is the shared contour, used for dynamic calls.
|
||||
targets := a.addOneNode(tInvalid, "SetFinalizer.targets", nil)
|
||||
cgn.sites = append(cgn.sites, &callsite{targets: targets})
|
||||
params := a.funcParams(cgn.obj)
|
||||
a.addConstraint(&runtimeSetFinalizerConstraint{
|
||||
targets: targets,
|
||||
x: params,
|
||||
f: params + 1,
|
||||
})
|
||||
}
|
||||
|
||||
// ---------- func time.startTimer(t *runtimeTimer) ----------
|
||||
|
||||
// time.StartTimer(t)
|
||||
type timeStartTimerConstraint struct {
|
||||
targets nodeid // (indirect)
|
||||
t nodeid // (ptr)
|
||||
}
|
||||
|
||||
func (c *timeStartTimerConstraint) ptr() nodeid { return c.t }
|
||||
func (c *timeStartTimerConstraint) presolve(h *hvn) {
|
||||
h.markIndirect(onodeid(c.targets), "StartTimer.targets")
|
||||
}
|
||||
func (c *timeStartTimerConstraint) renumber(mapping []nodeid) {
|
||||
c.targets = mapping[c.targets]
|
||||
c.t = mapping[c.t]
|
||||
}
|
||||
|
||||
func (c *timeStartTimerConstraint) String() string {
|
||||
return fmt.Sprintf("time.startTimer(n%d)", c.t)
|
||||
}
|
||||
|
||||
func (c *timeStartTimerConstraint) solve(a *analysis, delta *nodeset) {
|
||||
for _, tObj := range delta.AppendTo(a.deltaSpace) {
|
||||
t := nodeid(tObj)
|
||||
|
||||
// We model startTimer as if it was defined thus:
|
||||
// func startTimer(t *runtimeTimer) { t.f(t.arg) }
|
||||
|
||||
// We hard-code the field offsets of time.runtimeTimer:
|
||||
// type runtimeTimer struct {
|
||||
// 0 __identity__
|
||||
// 1 i int32
|
||||
// 2 when int64
|
||||
// 3 period int64
|
||||
// 4 f func(int64, interface{})
|
||||
// 5 arg interface{}
|
||||
// }
|
||||
f := t + 4
|
||||
arg := t + 5
|
||||
|
||||
// store t.arg to t.f.params[0]
|
||||
// (offset 1 => skip identity)
|
||||
a.store(f, arg, 1, 1)
|
||||
|
||||
// Add dynamic call target.
|
||||
if a.onlineCopy(c.targets, f) {
|
||||
a.addWork(c.targets)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ext۰time۰startTimer(a *analysis, cgn *cgnode) {
|
||||
// This is the shared contour, used for dynamic calls.
|
||||
targets := a.addOneNode(tInvalid, "startTimer.targets", nil)
|
||||
cgn.sites = append(cgn.sites, &callsite{targets: targets})
|
||||
params := a.funcParams(cgn.obj)
|
||||
a.addConstraint(&timeStartTimerConstraint{
|
||||
targets: targets,
|
||||
t: params,
|
||||
})
|
||||
}
|
||||
152
vendor/golang.org/x/tools/go/pointer/labels.go
generated
vendored
Normal file
152
vendor/golang.org/x/tools/go/pointer/labels.go
generated
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pointer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/ssa"
|
||||
)
|
||||
|
||||
// A Label is an entity that may be pointed to by a pointer, map,
|
||||
// channel, 'func', slice or interface.
|
||||
//
|
||||
// Labels include:
|
||||
// - functions
|
||||
// - globals
|
||||
// - tagged objects, representing interfaces and reflect.Values
|
||||
// - arrays created by conversions (e.g. []byte("foo"), []byte(s))
|
||||
// - stack- and heap-allocated variables (including composite literals)
|
||||
// - channels, maps and arrays created by make()
|
||||
// - intrinsic or reflective operations that allocate (e.g. append, reflect.New)
|
||||
// - intrinsic objects, e.g. the initial array behind os.Args.
|
||||
// - and their subelements, e.g. "alloc.y[*].z"
|
||||
//
|
||||
// Labels are so varied that they defy good generalizations;
|
||||
// some have no value, no callgraph node, or no position.
|
||||
// Many objects have types that are inexpressible in Go:
|
||||
// maps, channels, functions, tagged objects.
|
||||
//
|
||||
// At most one of Value() or ReflectType() may return non-nil.
|
||||
//
|
||||
type Label struct {
|
||||
obj *object // the addressable memory location containing this label
|
||||
subelement *fieldInfo // subelement path within obj, e.g. ".a.b[*].c"
|
||||
}
|
||||
|
||||
// Value returns the ssa.Value that allocated this label's object, if any.
|
||||
func (l Label) Value() ssa.Value {
|
||||
val, _ := l.obj.data.(ssa.Value)
|
||||
return val
|
||||
}
|
||||
|
||||
// ReflectType returns the type represented by this label if it is an
|
||||
// reflect.rtype instance object or *reflect.rtype-tagged object.
|
||||
//
|
||||
func (l Label) ReflectType() types.Type {
|
||||
rtype, _ := l.obj.data.(types.Type)
|
||||
return rtype
|
||||
}
|
||||
|
||||
// Path returns the path to the subelement of the object containing
|
||||
// this label. For example, ".x[*].y".
|
||||
//
|
||||
func (l Label) Path() string {
|
||||
return l.subelement.path()
|
||||
}
|
||||
|
||||
// Pos returns the position of this label, if known, zero otherwise.
|
||||
func (l Label) Pos() token.Pos {
|
||||
switch data := l.obj.data.(type) {
|
||||
case ssa.Value:
|
||||
return data.Pos()
|
||||
case types.Type:
|
||||
if nt, ok := deref(data).(*types.Named); ok {
|
||||
return nt.Obj().Pos()
|
||||
}
|
||||
}
|
||||
if cgn := l.obj.cgn; cgn != nil {
|
||||
return cgn.fn.Pos()
|
||||
}
|
||||
return token.NoPos
|
||||
}
|
||||
|
||||
// String returns the printed form of this label.
|
||||
//
|
||||
// Examples: Object type:
|
||||
// x (a variable)
|
||||
// (sync.Mutex).Lock (a function)
|
||||
// convert (array created by conversion)
|
||||
// makemap (map allocated via make)
|
||||
// makechan (channel allocated via make)
|
||||
// makeinterface (tagged object allocated by makeinterface)
|
||||
// <alloc in reflect.Zero> (allocation in instrinsic)
|
||||
// sync.Mutex (a reflect.rtype instance)
|
||||
// <command-line arguments> (an intrinsic object)
|
||||
//
|
||||
// Labels within compound objects have subelement paths:
|
||||
// x.y[*].z (a struct variable, x)
|
||||
// append.y[*].z (array allocated by append)
|
||||
// makeslice.y[*].z (array allocated via make)
|
||||
//
|
||||
// TODO(adonovan): expose func LabelString(*types.Package, Label).
|
||||
//
|
||||
func (l Label) String() string {
|
||||
var s string
|
||||
switch v := l.obj.data.(type) {
|
||||
case types.Type:
|
||||
return v.String()
|
||||
|
||||
case string:
|
||||
s = v // an intrinsic object (e.g. os.Args[*])
|
||||
|
||||
case nil:
|
||||
if l.obj.cgn != nil {
|
||||
// allocation by intrinsic or reflective operation
|
||||
s = fmt.Sprintf("<alloc in %s>", l.obj.cgn.fn)
|
||||
} else {
|
||||
s = "<unknown>" // should be unreachable
|
||||
}
|
||||
|
||||
case *ssa.Function:
|
||||
s = v.String()
|
||||
|
||||
case *ssa.Global:
|
||||
s = v.String()
|
||||
|
||||
case *ssa.Const:
|
||||
s = v.Name()
|
||||
|
||||
case *ssa.Alloc:
|
||||
s = v.Comment
|
||||
if s == "" {
|
||||
s = "alloc"
|
||||
}
|
||||
|
||||
case *ssa.Call:
|
||||
// Currently only calls to append can allocate objects.
|
||||
if v.Call.Value.(*ssa.Builtin).Object().Name() != "append" {
|
||||
panic("unhandled *ssa.Call label: " + v.Name())
|
||||
}
|
||||
s = "append"
|
||||
|
||||
case *ssa.MakeMap, *ssa.MakeChan, *ssa.MakeSlice, *ssa.Convert:
|
||||
s = strings.ToLower(strings.TrimPrefix(fmt.Sprintf("%T", v), "*ssa."))
|
||||
|
||||
case *ssa.MakeInterface:
|
||||
// MakeInterface is usually implicit in Go source (so
|
||||
// Pos()==0), and tagged objects may be allocated
|
||||
// synthetically (so no *MakeInterface data).
|
||||
s = "makeinterface:" + v.X.Type().String()
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("unhandled object data type: %T", v))
|
||||
}
|
||||
|
||||
return s + l.subelement.path()
|
||||
}
|
||||
132
vendor/golang.org/x/tools/go/pointer/opt.go
generated
vendored
Normal file
132
vendor/golang.org/x/tools/go/pointer/opt.go
generated
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pointer
|
||||
|
||||
// This file implements renumbering, a pre-solver optimization to
|
||||
// improve the efficiency of the solver's points-to set representation.
|
||||
//
|
||||
// TODO(adonovan): rename file "renumber.go"
|
||||
|
||||
import "fmt"
|
||||
|
||||
// renumber permutes a.nodes so that all nodes within an addressable
|
||||
// object appear before all non-addressable nodes, maintaining the
|
||||
// order of nodes within the same object (as required by offsetAddr).
|
||||
//
|
||||
// renumber must update every nodeid in the analysis (constraints,
|
||||
// Pointers, callgraph, etc) to reflect the new ordering.
|
||||
//
|
||||
// This is an optimisation to increase the locality and efficiency of
|
||||
// sparse representations of points-to sets. (Typically only about
|
||||
// 20% of nodes are within an object.)
|
||||
//
|
||||
// NB: nodes added during solving (e.g. for reflection, SetFinalizer)
|
||||
// will be appended to the end.
|
||||
//
|
||||
// Renumbering makes the PTA log inscrutable. To aid debugging, later
|
||||
// phases (e.g. HVN) must not rely on it having occurred.
|
||||
//
|
||||
func (a *analysis) renumber() {
|
||||
if a.log != nil {
|
||||
fmt.Fprintf(a.log, "\n\n==== Renumbering\n\n")
|
||||
}
|
||||
|
||||
N := nodeid(len(a.nodes))
|
||||
newNodes := make([]*node, N, N)
|
||||
renumbering := make([]nodeid, N, N) // maps old to new
|
||||
|
||||
var i, j nodeid
|
||||
|
||||
// The zero node is special.
|
||||
newNodes[j] = a.nodes[i]
|
||||
renumbering[i] = j
|
||||
i++
|
||||
j++
|
||||
|
||||
// Pass 1: object nodes.
|
||||
for i < N {
|
||||
obj := a.nodes[i].obj
|
||||
if obj == nil {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
end := i + nodeid(obj.size)
|
||||
for i < end {
|
||||
newNodes[j] = a.nodes[i]
|
||||
renumbering[i] = j
|
||||
i++
|
||||
j++
|
||||
}
|
||||
}
|
||||
nobj := j
|
||||
|
||||
// Pass 2: non-object nodes.
|
||||
for i = 1; i < N; {
|
||||
obj := a.nodes[i].obj
|
||||
if obj != nil {
|
||||
i += nodeid(obj.size)
|
||||
continue
|
||||
}
|
||||
|
||||
newNodes[j] = a.nodes[i]
|
||||
renumbering[i] = j
|
||||
i++
|
||||
j++
|
||||
}
|
||||
|
||||
if j != N {
|
||||
panic(fmt.Sprintf("internal error: j=%d, N=%d", j, N))
|
||||
}
|
||||
|
||||
// Log the remapping table.
|
||||
if a.log != nil {
|
||||
fmt.Fprintf(a.log, "Renumbering nodes to improve density:\n")
|
||||
fmt.Fprintf(a.log, "(%d object nodes of %d total)\n", nobj, N)
|
||||
for old, new := range renumbering {
|
||||
fmt.Fprintf(a.log, "\tn%d -> n%d\n", old, new)
|
||||
}
|
||||
}
|
||||
|
||||
// Now renumber all existing nodeids to use the new node permutation.
|
||||
// It is critical that all reachable nodeids are accounted for!
|
||||
|
||||
// Renumber nodeids in queried Pointers.
|
||||
for v, ptr := range a.result.Queries {
|
||||
ptr.n = renumbering[ptr.n]
|
||||
a.result.Queries[v] = ptr
|
||||
}
|
||||
for v, ptr := range a.result.IndirectQueries {
|
||||
ptr.n = renumbering[ptr.n]
|
||||
a.result.IndirectQueries[v] = ptr
|
||||
}
|
||||
for _, queries := range a.config.extendedQueries {
|
||||
for _, query := range queries {
|
||||
if query.ptr != nil {
|
||||
query.ptr.n = renumbering[query.ptr.n]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Renumber nodeids in global objects.
|
||||
for v, id := range a.globalobj {
|
||||
a.globalobj[v] = renumbering[id]
|
||||
}
|
||||
|
||||
// Renumber nodeids in constraints.
|
||||
for _, c := range a.constraints {
|
||||
c.renumber(renumbering)
|
||||
}
|
||||
|
||||
// Renumber nodeids in the call graph.
|
||||
for _, cgn := range a.cgnodes {
|
||||
cgn.obj = renumbering[cgn.obj]
|
||||
for _, site := range cgn.sites {
|
||||
site.targets = renumbering[site.targets]
|
||||
}
|
||||
}
|
||||
|
||||
a.nodes = newNodes
|
||||
}
|
||||
43
vendor/golang.org/x/tools/go/pointer/print.go
generated
vendored
Normal file
43
vendor/golang.org/x/tools/go/pointer/print.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pointer
|
||||
|
||||
import "fmt"
|
||||
|
||||
func (c *addrConstraint) String() string {
|
||||
return fmt.Sprintf("addr n%d <- {&n%d}", c.dst, c.src)
|
||||
}
|
||||
|
||||
func (c *copyConstraint) String() string {
|
||||
return fmt.Sprintf("copy n%d <- n%d", c.dst, c.src)
|
||||
}
|
||||
|
||||
func (c *loadConstraint) String() string {
|
||||
return fmt.Sprintf("load n%d <- n%d[%d]", c.dst, c.src, c.offset)
|
||||
}
|
||||
|
||||
func (c *storeConstraint) String() string {
|
||||
return fmt.Sprintf("store n%d[%d] <- n%d", c.dst, c.offset, c.src)
|
||||
}
|
||||
|
||||
func (c *offsetAddrConstraint) String() string {
|
||||
return fmt.Sprintf("offsetAddr n%d <- n%d.#%d", c.dst, c.src, c.offset)
|
||||
}
|
||||
|
||||
func (c *typeFilterConstraint) String() string {
|
||||
return fmt.Sprintf("typeFilter n%d <- n%d.(%s)", c.dst, c.src, c.typ)
|
||||
}
|
||||
|
||||
func (c *untagConstraint) String() string {
|
||||
return fmt.Sprintf("untag n%d <- n%d.(%s)", c.dst, c.src, c.typ)
|
||||
}
|
||||
|
||||
func (c *invokeConstraint) String() string {
|
||||
return fmt.Sprintf("invoke n%d.%s(n%d ...)", c.iface, c.method.Name(), c.params)
|
||||
}
|
||||
|
||||
func (n nodeid) String() string {
|
||||
return fmt.Sprintf("n%d", n)
|
||||
}
|
||||
221
vendor/golang.org/x/tools/go/pointer/query.go
generated
vendored
Normal file
221
vendor/golang.org/x/tools/go/pointer/query.go
generated
vendored
Normal file
@@ -0,0 +1,221 @@
|
||||
package pointer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// An extendedQuery represents a sequence of destructuring operations
|
||||
// applied to an ssa.Value (denoted by "x").
|
||||
type extendedQuery struct {
|
||||
ops []interface{}
|
||||
ptr *Pointer
|
||||
}
|
||||
|
||||
// indexValue returns the value of an integer literal used as an
|
||||
// index.
|
||||
func indexValue(expr ast.Expr) (int, error) {
|
||||
lit, ok := expr.(*ast.BasicLit)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("non-integer index (%T)", expr)
|
||||
}
|
||||
if lit.Kind != token.INT {
|
||||
return 0, fmt.Errorf("non-integer index %s", lit.Value)
|
||||
}
|
||||
return strconv.Atoi(lit.Value)
|
||||
}
|
||||
|
||||
// parseExtendedQuery parses and validates a destructuring Go
|
||||
// expression and returns the sequence of destructuring operations.
|
||||
// See parseDestructuringExpr for details.
|
||||
func parseExtendedQuery(typ types.Type, query string) ([]interface{}, types.Type, error) {
|
||||
expr, err := parser.ParseExpr(query)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ops, typ, err := destructuringOps(typ, expr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(ops) == 0 {
|
||||
return nil, nil, errors.New("invalid query: must not be empty")
|
||||
}
|
||||
if ops[0] != "x" {
|
||||
return nil, nil, fmt.Errorf("invalid query: query operand must be named x")
|
||||
}
|
||||
if !CanPoint(typ) {
|
||||
return nil, nil, fmt.Errorf("query does not describe a pointer-like value: %s", typ)
|
||||
}
|
||||
return ops, typ, nil
|
||||
}
|
||||
|
||||
// destructuringOps parses a Go expression consisting only of an
|
||||
// identifier "x", field selections, indexing, channel receives, load
|
||||
// operations and parens---for example: "<-(*x[i])[key]"--- and
|
||||
// returns the sequence of destructuring operations on x.
|
||||
func destructuringOps(typ types.Type, expr ast.Expr) ([]interface{}, types.Type, error) {
|
||||
switch expr := expr.(type) {
|
||||
case *ast.SelectorExpr:
|
||||
out, typ, err := destructuringOps(typ, expr.X)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var structT *types.Struct
|
||||
switch typ := typ.Underlying().(type) {
|
||||
case *types.Pointer:
|
||||
var ok bool
|
||||
structT, ok = typ.Elem().Underlying().(*types.Struct)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("cannot access field %s of pointer to type %s", expr.Sel.Name, typ.Elem())
|
||||
}
|
||||
|
||||
out = append(out, "load")
|
||||
case *types.Struct:
|
||||
structT = typ
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("cannot access field %s of type %s", expr.Sel.Name, typ)
|
||||
}
|
||||
|
||||
for i := 0; i < structT.NumFields(); i++ {
|
||||
field := structT.Field(i)
|
||||
if field.Name() == expr.Sel.Name {
|
||||
out = append(out, "field", i)
|
||||
return out, field.Type().Underlying(), nil
|
||||
}
|
||||
}
|
||||
// TODO(dh): supporting embedding would need something like
|
||||
// types.LookupFieldOrMethod, but without taking package
|
||||
// boundaries into account, because we may want to access
|
||||
// unexported fields. If we were only interested in one level
|
||||
// of unexported name, we could determine the appropriate
|
||||
// package and run LookupFieldOrMethod with that. However, a
|
||||
// single query may want to cross multiple package boundaries,
|
||||
// and at this point it's not really worth the complexity.
|
||||
return nil, nil, fmt.Errorf("no field %s in %s (embedded fields must be resolved manually)", expr.Sel.Name, structT)
|
||||
case *ast.Ident:
|
||||
return []interface{}{expr.Name}, typ, nil
|
||||
case *ast.BasicLit:
|
||||
return []interface{}{expr.Value}, nil, nil
|
||||
case *ast.IndexExpr:
|
||||
out, typ, err := destructuringOps(typ, expr.X)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
switch typ := typ.Underlying().(type) {
|
||||
case *types.Array:
|
||||
out = append(out, "arrayelem")
|
||||
return out, typ.Elem().Underlying(), nil
|
||||
case *types.Slice:
|
||||
out = append(out, "sliceelem")
|
||||
return out, typ.Elem().Underlying(), nil
|
||||
case *types.Map:
|
||||
out = append(out, "mapelem")
|
||||
return out, typ.Elem().Underlying(), nil
|
||||
case *types.Tuple:
|
||||
out = append(out, "index")
|
||||
idx, err := indexValue(expr.Index)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
out = append(out, idx)
|
||||
if idx >= typ.Len() || idx < 0 {
|
||||
return nil, nil, fmt.Errorf("tuple index %d out of bounds", idx)
|
||||
}
|
||||
return out, typ.At(idx).Type().Underlying(), nil
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("cannot index type %s", typ)
|
||||
}
|
||||
|
||||
case *ast.UnaryExpr:
|
||||
if expr.Op != token.ARROW {
|
||||
return nil, nil, fmt.Errorf("unsupported unary operator %s", expr.Op)
|
||||
}
|
||||
out, typ, err := destructuringOps(typ, expr.X)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ch, ok := typ.(*types.Chan)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("cannot receive from value of type %s", typ)
|
||||
}
|
||||
out = append(out, "recv")
|
||||
return out, ch.Elem().Underlying(), err
|
||||
case *ast.ParenExpr:
|
||||
return destructuringOps(typ, expr.X)
|
||||
case *ast.StarExpr:
|
||||
out, typ, err := destructuringOps(typ, expr.X)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ptr, ok := typ.(*types.Pointer)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("cannot dereference type %s", typ)
|
||||
}
|
||||
out = append(out, "load")
|
||||
return out, ptr.Elem().Underlying(), err
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unsupported expression %T", expr)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *analysis) evalExtendedQuery(t types.Type, id nodeid, ops []interface{}) (types.Type, nodeid) {
|
||||
pid := id
|
||||
// TODO(dh): we're allocating intermediary nodes each time
|
||||
// evalExtendedQuery is called. We should probably only generate
|
||||
// them once per (v, ops) pair.
|
||||
for i := 1; i < len(ops); i++ {
|
||||
var nid nodeid
|
||||
switch ops[i] {
|
||||
case "recv":
|
||||
t = t.(*types.Chan).Elem().Underlying()
|
||||
nid = a.addNodes(t, "query.extended")
|
||||
a.load(nid, pid, 0, a.sizeof(t))
|
||||
case "field":
|
||||
i++ // fetch field index
|
||||
tt := t.(*types.Struct)
|
||||
idx := ops[i].(int)
|
||||
offset := a.offsetOf(t, idx)
|
||||
t = tt.Field(idx).Type().Underlying()
|
||||
nid = a.addNodes(t, "query.extended")
|
||||
a.copy(nid, pid+nodeid(offset), a.sizeof(t))
|
||||
case "arrayelem":
|
||||
t = t.(*types.Array).Elem().Underlying()
|
||||
nid = a.addNodes(t, "query.extended")
|
||||
a.copy(nid, 1+pid, a.sizeof(t))
|
||||
case "sliceelem":
|
||||
t = t.(*types.Slice).Elem().Underlying()
|
||||
nid = a.addNodes(t, "query.extended")
|
||||
a.load(nid, pid, 1, a.sizeof(t))
|
||||
case "mapelem":
|
||||
tt := t.(*types.Map)
|
||||
t = tt.Elem()
|
||||
ksize := a.sizeof(tt.Key())
|
||||
vsize := a.sizeof(tt.Elem())
|
||||
nid = a.addNodes(t, "query.extended")
|
||||
a.load(nid, pid, ksize, vsize)
|
||||
case "index":
|
||||
i++ // fetch index
|
||||
tt := t.(*types.Tuple)
|
||||
idx := ops[i].(int)
|
||||
t = tt.At(idx).Type().Underlying()
|
||||
nid = a.addNodes(t, "query.extended")
|
||||
a.copy(nid, pid+nodeid(idx), a.sizeof(t))
|
||||
case "load":
|
||||
t = t.(*types.Pointer).Elem().Underlying()
|
||||
nid = a.addNodes(t, "query.extended")
|
||||
a.load(nid, pid, 0, a.sizeof(t))
|
||||
default:
|
||||
// shouldn't happen
|
||||
panic(fmt.Sprintf("unknown op %q", ops[i]))
|
||||
}
|
||||
pid = nid
|
||||
}
|
||||
|
||||
return t, pid
|
||||
}
|
||||
1975
vendor/golang.org/x/tools/go/pointer/reflect.go
generated
vendored
Normal file
1975
vendor/golang.org/x/tools/go/pointer/reflect.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
370
vendor/golang.org/x/tools/go/pointer/solve.go
generated
vendored
Normal file
370
vendor/golang.org/x/tools/go/pointer/solve.go
generated
vendored
Normal file
@@ -0,0 +1,370 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pointer
|
||||
|
||||
// This file defines a naive Andersen-style solver for the inclusion
|
||||
// constraint system.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
type solverState struct {
|
||||
complex []constraint // complex constraints attached to this node
|
||||
copyTo nodeset // simple copy constraint edges
|
||||
pts nodeset // points-to set of this node
|
||||
prevPTS nodeset // pts(n) in previous iteration (for difference propagation)
|
||||
}
|
||||
|
||||
func (a *analysis) solve() {
|
||||
start("Solving")
|
||||
if a.log != nil {
|
||||
fmt.Fprintf(a.log, "\n\n==== Solving constraints\n\n")
|
||||
}
|
||||
|
||||
// Solver main loop.
|
||||
var delta nodeset
|
||||
for {
|
||||
// Add new constraints to the graph:
|
||||
// static constraints from SSA on round 1,
|
||||
// dynamic constraints from reflection thereafter.
|
||||
a.processNewConstraints()
|
||||
|
||||
var x int
|
||||
if !a.work.TakeMin(&x) {
|
||||
break // empty
|
||||
}
|
||||
id := nodeid(x)
|
||||
if a.log != nil {
|
||||
fmt.Fprintf(a.log, "\tnode n%d\n", id)
|
||||
}
|
||||
|
||||
n := a.nodes[id]
|
||||
|
||||
// Difference propagation.
|
||||
delta.Difference(&n.solve.pts.Sparse, &n.solve.prevPTS.Sparse)
|
||||
if delta.IsEmpty() {
|
||||
continue
|
||||
}
|
||||
if a.log != nil {
|
||||
fmt.Fprintf(a.log, "\t\tpts(n%d : %s) = %s + %s\n",
|
||||
id, n.typ, &delta, &n.solve.prevPTS)
|
||||
}
|
||||
n.solve.prevPTS.Copy(&n.solve.pts.Sparse)
|
||||
|
||||
// Apply all resolution rules attached to n.
|
||||
a.solveConstraints(n, &delta)
|
||||
|
||||
if a.log != nil {
|
||||
fmt.Fprintf(a.log, "\t\tpts(n%d) = %s\n", id, &n.solve.pts)
|
||||
}
|
||||
}
|
||||
|
||||
if !a.nodes[0].solve.pts.IsEmpty() {
|
||||
panic(fmt.Sprintf("pts(0) is nonempty: %s", &a.nodes[0].solve.pts))
|
||||
}
|
||||
|
||||
// Release working state (but keep final PTS).
|
||||
for _, n := range a.nodes {
|
||||
n.solve.complex = nil
|
||||
n.solve.copyTo.Clear()
|
||||
n.solve.prevPTS.Clear()
|
||||
}
|
||||
|
||||
if a.log != nil {
|
||||
fmt.Fprintf(a.log, "Solver done\n")
|
||||
|
||||
// Dump solution.
|
||||
for i, n := range a.nodes {
|
||||
if !n.solve.pts.IsEmpty() {
|
||||
fmt.Fprintf(a.log, "pts(n%d) = %s : %s\n", i, &n.solve.pts, n.typ)
|
||||
}
|
||||
}
|
||||
}
|
||||
stop("Solving")
|
||||
}
|
||||
|
||||
// processNewConstraints takes the new constraints from a.constraints
|
||||
// and adds them to the graph, ensuring
|
||||
// that new constraints are applied to pre-existing labels and
|
||||
// that pre-existing constraints are applied to new labels.
|
||||
//
|
||||
func (a *analysis) processNewConstraints() {
|
||||
// Take the slice of new constraints.
|
||||
// (May grow during call to solveConstraints.)
|
||||
constraints := a.constraints
|
||||
a.constraints = nil
|
||||
|
||||
// Initialize points-to sets from addr-of (base) constraints.
|
||||
for _, c := range constraints {
|
||||
if c, ok := c.(*addrConstraint); ok {
|
||||
dst := a.nodes[c.dst]
|
||||
dst.solve.pts.add(c.src)
|
||||
|
||||
// Populate the worklist with nodes that point to
|
||||
// something initially (due to addrConstraints) and
|
||||
// have other constraints attached.
|
||||
// (A no-op in round 1.)
|
||||
if !dst.solve.copyTo.IsEmpty() || len(dst.solve.complex) > 0 {
|
||||
a.addWork(c.dst)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attach simple (copy) and complex constraints to nodes.
|
||||
var stale nodeset
|
||||
for _, c := range constraints {
|
||||
var id nodeid
|
||||
switch c := c.(type) {
|
||||
case *addrConstraint:
|
||||
// base constraints handled in previous loop
|
||||
continue
|
||||
case *copyConstraint:
|
||||
// simple (copy) constraint
|
||||
id = c.src
|
||||
a.nodes[id].solve.copyTo.add(c.dst)
|
||||
default:
|
||||
// complex constraint
|
||||
id = c.ptr()
|
||||
solve := a.nodes[id].solve
|
||||
solve.complex = append(solve.complex, c)
|
||||
}
|
||||
|
||||
if n := a.nodes[id]; !n.solve.pts.IsEmpty() {
|
||||
if !n.solve.prevPTS.IsEmpty() {
|
||||
stale.add(id)
|
||||
}
|
||||
a.addWork(id)
|
||||
}
|
||||
}
|
||||
// Apply new constraints to pre-existing PTS labels.
|
||||
var space [50]int
|
||||
for _, id := range stale.AppendTo(space[:0]) {
|
||||
n := a.nodes[nodeid(id)]
|
||||
a.solveConstraints(n, &n.solve.prevPTS)
|
||||
}
|
||||
}
|
||||
|
||||
// solveConstraints applies each resolution rule attached to node n to
|
||||
// the set of labels delta. It may generate new constraints in
|
||||
// a.constraints.
|
||||
//
|
||||
func (a *analysis) solveConstraints(n *node, delta *nodeset) {
|
||||
if delta.IsEmpty() {
|
||||
return
|
||||
}
|
||||
|
||||
// Process complex constraints dependent on n.
|
||||
for _, c := range n.solve.complex {
|
||||
if a.log != nil {
|
||||
fmt.Fprintf(a.log, "\t\tconstraint %s\n", c)
|
||||
}
|
||||
c.solve(a, delta)
|
||||
}
|
||||
|
||||
// Process copy constraints.
|
||||
var copySeen nodeset
|
||||
for _, x := range n.solve.copyTo.AppendTo(a.deltaSpace) {
|
||||
mid := nodeid(x)
|
||||
if copySeen.add(mid) {
|
||||
if a.nodes[mid].solve.pts.addAll(delta) {
|
||||
a.addWork(mid)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// addLabel adds label to the points-to set of ptr and reports whether the set grew.
|
||||
func (a *analysis) addLabel(ptr, label nodeid) bool {
|
||||
b := a.nodes[ptr].solve.pts.add(label)
|
||||
if b && a.log != nil {
|
||||
fmt.Fprintf(a.log, "\t\tpts(n%d) += n%d\n", ptr, label)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (a *analysis) addWork(id nodeid) {
|
||||
a.work.Insert(int(id))
|
||||
if a.log != nil {
|
||||
fmt.Fprintf(a.log, "\t\twork: n%d\n", id)
|
||||
}
|
||||
}
|
||||
|
||||
// onlineCopy adds a copy edge. It is called online, i.e. during
|
||||
// solving, so it adds edges and pts members directly rather than by
|
||||
// instantiating a 'constraint'.
|
||||
//
|
||||
// The size of the copy is implicitly 1.
|
||||
// It returns true if pts(dst) changed.
|
||||
//
|
||||
func (a *analysis) onlineCopy(dst, src nodeid) bool {
|
||||
if dst != src {
|
||||
if nsrc := a.nodes[src]; nsrc.solve.copyTo.add(dst) {
|
||||
if a.log != nil {
|
||||
fmt.Fprintf(a.log, "\t\t\tdynamic copy n%d <- n%d\n", dst, src)
|
||||
}
|
||||
// TODO(adonovan): most calls to onlineCopy
|
||||
// are followed by addWork, possibly batched
|
||||
// via a 'changed' flag; see if there's a
|
||||
// noticeable penalty to calling addWork here.
|
||||
return a.nodes[dst].solve.pts.addAll(&nsrc.solve.pts)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns sizeof.
|
||||
// Implicitly adds nodes to worklist.
|
||||
//
|
||||
// TODO(adonovan): now that we support a.copy() during solving, we
|
||||
// could eliminate onlineCopyN, but it's much slower. Investigate.
|
||||
//
|
||||
func (a *analysis) onlineCopyN(dst, src nodeid, sizeof uint32) uint32 {
|
||||
for i := uint32(0); i < sizeof; i++ {
|
||||
if a.onlineCopy(dst, src) {
|
||||
a.addWork(dst)
|
||||
}
|
||||
src++
|
||||
dst++
|
||||
}
|
||||
return sizeof
|
||||
}
|
||||
|
||||
func (c *loadConstraint) solve(a *analysis, delta *nodeset) {
|
||||
var changed bool
|
||||
for _, x := range delta.AppendTo(a.deltaSpace) {
|
||||
k := nodeid(x)
|
||||
koff := k + nodeid(c.offset)
|
||||
if a.onlineCopy(c.dst, koff) {
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
if changed {
|
||||
a.addWork(c.dst)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *storeConstraint) solve(a *analysis, delta *nodeset) {
|
||||
for _, x := range delta.AppendTo(a.deltaSpace) {
|
||||
k := nodeid(x)
|
||||
koff := k + nodeid(c.offset)
|
||||
if a.onlineCopy(koff, c.src) {
|
||||
a.addWork(koff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *offsetAddrConstraint) solve(a *analysis, delta *nodeset) {
|
||||
dst := a.nodes[c.dst]
|
||||
for _, x := range delta.AppendTo(a.deltaSpace) {
|
||||
k := nodeid(x)
|
||||
if dst.solve.pts.add(k + nodeid(c.offset)) {
|
||||
a.addWork(c.dst)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *typeFilterConstraint) solve(a *analysis, delta *nodeset) {
|
||||
for _, x := range delta.AppendTo(a.deltaSpace) {
|
||||
ifaceObj := nodeid(x)
|
||||
tDyn, _, indirect := a.taggedValue(ifaceObj)
|
||||
if indirect {
|
||||
// TODO(adonovan): we'll need to implement this
|
||||
// when we start creating indirect tagged objects.
|
||||
panic("indirect tagged object")
|
||||
}
|
||||
|
||||
if types.AssignableTo(tDyn, c.typ) {
|
||||
if a.addLabel(c.dst, ifaceObj) {
|
||||
a.addWork(c.dst)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *untagConstraint) solve(a *analysis, delta *nodeset) {
|
||||
predicate := types.AssignableTo
|
||||
if c.exact {
|
||||
predicate = types.Identical
|
||||
}
|
||||
for _, x := range delta.AppendTo(a.deltaSpace) {
|
||||
ifaceObj := nodeid(x)
|
||||
tDyn, v, indirect := a.taggedValue(ifaceObj)
|
||||
if indirect {
|
||||
// TODO(adonovan): we'll need to implement this
|
||||
// when we start creating indirect tagged objects.
|
||||
panic("indirect tagged object")
|
||||
}
|
||||
|
||||
if predicate(tDyn, c.typ) {
|
||||
// Copy payload sans tag to dst.
|
||||
//
|
||||
// TODO(adonovan): opt: if tDyn is
|
||||
// nonpointerlike we can skip this entire
|
||||
// constraint, perhaps. We only care about
|
||||
// pointers among the fields.
|
||||
a.onlineCopyN(c.dst, v, a.sizeof(tDyn))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *invokeConstraint) solve(a *analysis, delta *nodeset) {
|
||||
for _, x := range delta.AppendTo(a.deltaSpace) {
|
||||
ifaceObj := nodeid(x)
|
||||
tDyn, v, indirect := a.taggedValue(ifaceObj)
|
||||
if indirect {
|
||||
// TODO(adonovan): we may need to implement this if
|
||||
// we ever apply invokeConstraints to reflect.Value PTSs,
|
||||
// e.g. for (reflect.Value).Call.
|
||||
panic("indirect tagged object")
|
||||
}
|
||||
|
||||
// Look up the concrete method.
|
||||
fn := a.prog.LookupMethod(tDyn, c.method.Pkg(), c.method.Name())
|
||||
if fn == nil {
|
||||
panic(fmt.Sprintf("n%d: no ssa.Function for %s", c.iface, c.method))
|
||||
}
|
||||
sig := fn.Signature
|
||||
|
||||
fnObj := a.globalobj[fn] // dynamic calls use shared contour
|
||||
if fnObj == 0 {
|
||||
// a.objectNode(fn) was not called during gen phase.
|
||||
panic(fmt.Sprintf("a.globalobj[%s]==nil", fn))
|
||||
}
|
||||
|
||||
// Make callsite's fn variable point to identity of
|
||||
// concrete method. (There's no need to add it to
|
||||
// worklist since it never has attached constraints.)
|
||||
a.addLabel(c.params, fnObj)
|
||||
|
||||
// Extract value and connect to method's receiver.
|
||||
// Copy payload to method's receiver param (arg0).
|
||||
arg0 := a.funcParams(fnObj)
|
||||
recvSize := a.sizeof(sig.Recv().Type())
|
||||
a.onlineCopyN(arg0, v, recvSize)
|
||||
|
||||
src := c.params + 1 // skip past identity
|
||||
dst := arg0 + nodeid(recvSize)
|
||||
|
||||
// Copy caller's argument block to method formal parameters.
|
||||
paramsSize := a.sizeof(sig.Params())
|
||||
a.onlineCopyN(dst, src, paramsSize)
|
||||
src += nodeid(paramsSize)
|
||||
dst += nodeid(paramsSize)
|
||||
|
||||
// Copy method results to caller's result block.
|
||||
resultsSize := a.sizeof(sig.Results())
|
||||
a.onlineCopyN(src, dst, resultsSize)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *addrConstraint) solve(a *analysis, delta *nodeset) {
|
||||
panic("addr is not a complex constraint")
|
||||
}
|
||||
|
||||
func (c *copyConstraint) solve(a *analysis, delta *nodeset) {
|
||||
panic("copy is not a complex constraint")
|
||||
}
|
||||
313
vendor/golang.org/x/tools/go/pointer/util.go
generated
vendored
Normal file
313
vendor/golang.org/x/tools/go/pointer/util.go
generated
vendored
Normal file
@@ -0,0 +1,313 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pointer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/container/intsets"
|
||||
)
|
||||
|
||||
// CanPoint reports whether the type T is pointerlike,
|
||||
// for the purposes of this analysis.
|
||||
func CanPoint(T types.Type) bool {
|
||||
switch T := T.(type) {
|
||||
case *types.Named:
|
||||
if obj := T.Obj(); obj.Name() == "Value" && obj.Pkg().Path() == "reflect" {
|
||||
return true // treat reflect.Value like interface{}
|
||||
}
|
||||
return CanPoint(T.Underlying())
|
||||
case *types.Pointer, *types.Interface, *types.Map, *types.Chan, *types.Signature, *types.Slice:
|
||||
return true
|
||||
}
|
||||
|
||||
return false // array struct tuple builtin basic
|
||||
}
|
||||
|
||||
// CanHaveDynamicTypes reports whether the type T can "hold" dynamic types,
|
||||
// i.e. is an interface (incl. reflect.Type) or a reflect.Value.
|
||||
//
|
||||
func CanHaveDynamicTypes(T types.Type) bool {
|
||||
switch T := T.(type) {
|
||||
case *types.Named:
|
||||
if obj := T.Obj(); obj.Name() == "Value" && obj.Pkg().Path() == "reflect" {
|
||||
return true // reflect.Value
|
||||
}
|
||||
return CanHaveDynamicTypes(T.Underlying())
|
||||
case *types.Interface:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isInterface(T types.Type) bool { return types.IsInterface(T) }
|
||||
|
||||
// mustDeref returns the element type of its argument, which must be a
|
||||
// pointer; panic ensues otherwise.
|
||||
func mustDeref(typ types.Type) types.Type {
|
||||
return typ.Underlying().(*types.Pointer).Elem()
|
||||
}
|
||||
|
||||
// deref returns a pointer's element type; otherwise it returns typ.
|
||||
func deref(typ types.Type) types.Type {
|
||||
if p, ok := typ.Underlying().(*types.Pointer); ok {
|
||||
return p.Elem()
|
||||
}
|
||||
return typ
|
||||
}
|
||||
|
||||
// A fieldInfo describes one subelement (node) of the flattening-out
|
||||
// of a type T: the subelement's type and its path from the root of T.
|
||||
//
|
||||
// For example, for this type:
|
||||
// type line struct{ points []struct{x, y int} }
|
||||
// flatten() of the inner struct yields the following []fieldInfo:
|
||||
// struct{ x, y int } ""
|
||||
// int ".x"
|
||||
// int ".y"
|
||||
// and flatten(line) yields:
|
||||
// struct{ points []struct{x, y int} } ""
|
||||
// struct{ x, y int } ".points[*]"
|
||||
// int ".points[*].x
|
||||
// int ".points[*].y"
|
||||
//
|
||||
type fieldInfo struct {
|
||||
typ types.Type
|
||||
|
||||
// op and tail describe the path to the element (e.g. ".a#2.b[*].c").
|
||||
op interface{} // *Array: true; *Tuple: int; *Struct: *types.Var; *Named: nil
|
||||
tail *fieldInfo
|
||||
}
|
||||
|
||||
// path returns a user-friendly string describing the subelement path.
|
||||
//
|
||||
func (fi *fieldInfo) path() string {
|
||||
var buf bytes.Buffer
|
||||
for p := fi; p != nil; p = p.tail {
|
||||
switch op := p.op.(type) {
|
||||
case bool:
|
||||
fmt.Fprintf(&buf, "[*]")
|
||||
case int:
|
||||
fmt.Fprintf(&buf, "#%d", op)
|
||||
case *types.Var:
|
||||
fmt.Fprintf(&buf, ".%s", op.Name())
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// flatten returns a list of directly contained fields in the preorder
|
||||
// traversal of the type tree of t. The resulting elements are all
|
||||
// scalars (basic types or pointerlike types), except for struct/array
|
||||
// "identity" nodes, whose type is that of the aggregate.
|
||||
//
|
||||
// reflect.Value is considered pointerlike, similar to interface{}.
|
||||
//
|
||||
// Callers must not mutate the result.
|
||||
//
|
||||
func (a *analysis) flatten(t types.Type) []*fieldInfo {
|
||||
fl, ok := a.flattenMemo[t]
|
||||
if !ok {
|
||||
switch t := t.(type) {
|
||||
case *types.Named:
|
||||
u := t.Underlying()
|
||||
if isInterface(u) {
|
||||
// Debuggability hack: don't remove
|
||||
// the named type from interfaces as
|
||||
// they're very verbose.
|
||||
fl = append(fl, &fieldInfo{typ: t})
|
||||
} else {
|
||||
fl = a.flatten(u)
|
||||
}
|
||||
|
||||
case *types.Basic,
|
||||
*types.Signature,
|
||||
*types.Chan,
|
||||
*types.Map,
|
||||
*types.Interface,
|
||||
*types.Slice,
|
||||
*types.Pointer:
|
||||
fl = append(fl, &fieldInfo{typ: t})
|
||||
|
||||
case *types.Array:
|
||||
fl = append(fl, &fieldInfo{typ: t}) // identity node
|
||||
for _, fi := range a.flatten(t.Elem()) {
|
||||
fl = append(fl, &fieldInfo{typ: fi.typ, op: true, tail: fi})
|
||||
}
|
||||
|
||||
case *types.Struct:
|
||||
fl = append(fl, &fieldInfo{typ: t}) // identity node
|
||||
for i, n := 0, t.NumFields(); i < n; i++ {
|
||||
f := t.Field(i)
|
||||
for _, fi := range a.flatten(f.Type()) {
|
||||
fl = append(fl, &fieldInfo{typ: fi.typ, op: f, tail: fi})
|
||||
}
|
||||
}
|
||||
|
||||
case *types.Tuple:
|
||||
// No identity node: tuples are never address-taken.
|
||||
n := t.Len()
|
||||
if n == 1 {
|
||||
// Don't add a fieldInfo link for singletons,
|
||||
// e.g. in params/results.
|
||||
fl = append(fl, a.flatten(t.At(0).Type())...)
|
||||
} else {
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.At(i)
|
||||
for _, fi := range a.flatten(f.Type()) {
|
||||
fl = append(fl, &fieldInfo{typ: fi.typ, op: i, tail: fi})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("cannot flatten unsupported type %T", t))
|
||||
}
|
||||
|
||||
a.flattenMemo[t] = fl
|
||||
}
|
||||
|
||||
return fl
|
||||
}
|
||||
|
||||
// sizeof returns the number of pointerlike abstractions (nodes) in the type t.
|
||||
func (a *analysis) sizeof(t types.Type) uint32 {
|
||||
return uint32(len(a.flatten(t)))
|
||||
}
|
||||
|
||||
// shouldTrack reports whether object type T contains (recursively)
|
||||
// any fields whose addresses should be tracked.
|
||||
func (a *analysis) shouldTrack(T types.Type) bool {
|
||||
if a.track == trackAll {
|
||||
return true // fast path
|
||||
}
|
||||
track, ok := a.trackTypes[T]
|
||||
if !ok {
|
||||
a.trackTypes[T] = true // break cycles conservatively
|
||||
// NB: reflect.Value, reflect.Type are pre-populated to true.
|
||||
for _, fi := range a.flatten(T) {
|
||||
switch ft := fi.typ.Underlying().(type) {
|
||||
case *types.Interface, *types.Signature:
|
||||
track = true // needed for callgraph
|
||||
case *types.Basic:
|
||||
// no-op
|
||||
case *types.Chan:
|
||||
track = a.track&trackChan != 0 || a.shouldTrack(ft.Elem())
|
||||
case *types.Map:
|
||||
track = a.track&trackMap != 0 || a.shouldTrack(ft.Key()) || a.shouldTrack(ft.Elem())
|
||||
case *types.Slice:
|
||||
track = a.track&trackSlice != 0 || a.shouldTrack(ft.Elem())
|
||||
case *types.Pointer:
|
||||
track = a.track&trackPtr != 0 || a.shouldTrack(ft.Elem())
|
||||
case *types.Array, *types.Struct:
|
||||
// No need to look at field types since they will follow (flattened).
|
||||
default:
|
||||
// Includes *types.Tuple, which are never address-taken.
|
||||
panic(ft)
|
||||
}
|
||||
if track {
|
||||
break
|
||||
}
|
||||
}
|
||||
a.trackTypes[T] = track
|
||||
if !track && a.log != nil {
|
||||
fmt.Fprintf(a.log, "\ttype not tracked: %s\n", T)
|
||||
}
|
||||
}
|
||||
return track
|
||||
}
|
||||
|
||||
// offsetOf returns the (abstract) offset of field index within struct
|
||||
// or tuple typ.
|
||||
func (a *analysis) offsetOf(typ types.Type, index int) uint32 {
|
||||
var offset uint32
|
||||
switch t := typ.Underlying().(type) {
|
||||
case *types.Tuple:
|
||||
for i := 0; i < index; i++ {
|
||||
offset += a.sizeof(t.At(i).Type())
|
||||
}
|
||||
case *types.Struct:
|
||||
offset++ // the node for the struct itself
|
||||
for i := 0; i < index; i++ {
|
||||
offset += a.sizeof(t.Field(i).Type())
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("offsetOf(%s : %T)", typ, typ))
|
||||
}
|
||||
return offset
|
||||
}
|
||||
|
||||
// sliceToArray returns the type representing the arrays to which
|
||||
// slice type slice points.
|
||||
func sliceToArray(slice types.Type) *types.Array {
|
||||
return types.NewArray(slice.Underlying().(*types.Slice).Elem(), 1)
|
||||
}
|
||||
|
||||
// Node set -------------------------------------------------------------------
|
||||
|
||||
type nodeset struct {
|
||||
intsets.Sparse
|
||||
}
|
||||
|
||||
func (ns *nodeset) String() string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteRune('{')
|
||||
var space [50]int
|
||||
for i, n := range ns.AppendTo(space[:0]) {
|
||||
if i > 0 {
|
||||
buf.WriteString(", ")
|
||||
}
|
||||
buf.WriteRune('n')
|
||||
fmt.Fprintf(&buf, "%d", n)
|
||||
}
|
||||
buf.WriteRune('}')
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (ns *nodeset) add(n nodeid) bool {
|
||||
return ns.Sparse.Insert(int(n))
|
||||
}
|
||||
|
||||
func (x *nodeset) addAll(y *nodeset) bool {
|
||||
return x.UnionWith(&y.Sparse)
|
||||
}
|
||||
|
||||
// Profiling & debugging -------------------------------------------------------
|
||||
|
||||
var timers = make(map[string]time.Time)
|
||||
|
||||
func start(name string) {
|
||||
if debugTimers {
|
||||
timers[name] = time.Now()
|
||||
log.Printf("%s...\n", name)
|
||||
}
|
||||
}
|
||||
|
||||
func stop(name string) {
|
||||
if debugTimers {
|
||||
log.Printf("%s took %s\n", name, time.Since(timers[name]))
|
||||
}
|
||||
}
|
||||
|
||||
// diff runs the command "diff a b" and reports its success.
|
||||
func diff(a, b string) bool {
|
||||
var cmd *exec.Cmd
|
||||
switch runtime.GOOS {
|
||||
case "plan9":
|
||||
cmd = exec.Command("/bin/diff", "-c", a, b)
|
||||
default:
|
||||
cmd = exec.Command("/usr/bin/diff", "-u", a, b)
|
||||
}
|
||||
cmd.Stdout = os.Stderr
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run() == nil
|
||||
}
|
||||
187
vendor/golang.org/x/tools/go/ssa/blockopt.go
generated
vendored
Normal file
187
vendor/golang.org/x/tools/go/ssa/blockopt.go
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// Simple block optimizations to simplify the control flow graph.
|
||||
|
||||
// TODO(adonovan): opt: instead of creating several "unreachable" blocks
|
||||
// per function in the Builder, reuse a single one (e.g. at Blocks[1])
|
||||
// to reduce garbage.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// If true, perform sanity checking and show progress at each
|
||||
// successive iteration of optimizeBlocks. Very verbose.
|
||||
const debugBlockOpt = false
|
||||
|
||||
// markReachable sets Index=-1 for all blocks reachable from b.
|
||||
func markReachable(b *BasicBlock) {
|
||||
b.Index = -1
|
||||
for _, succ := range b.Succs {
|
||||
if succ.Index == 0 {
|
||||
markReachable(succ)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deleteUnreachableBlocks marks all reachable blocks of f and
|
||||
// eliminates (nils) all others, including possibly cyclic subgraphs.
|
||||
//
|
||||
func deleteUnreachableBlocks(f *Function) {
|
||||
const white, black = 0, -1
|
||||
// We borrow b.Index temporarily as the mark bit.
|
||||
for _, b := range f.Blocks {
|
||||
b.Index = white
|
||||
}
|
||||
markReachable(f.Blocks[0])
|
||||
if f.Recover != nil {
|
||||
markReachable(f.Recover)
|
||||
}
|
||||
for i, b := range f.Blocks {
|
||||
if b.Index == white {
|
||||
for _, c := range b.Succs {
|
||||
if c.Index == black {
|
||||
c.removePred(b) // delete white->black edge
|
||||
}
|
||||
}
|
||||
if debugBlockOpt {
|
||||
fmt.Fprintln(os.Stderr, "unreachable", b)
|
||||
}
|
||||
f.Blocks[i] = nil // delete b
|
||||
}
|
||||
}
|
||||
f.removeNilBlocks()
|
||||
}
|
||||
|
||||
// jumpThreading attempts to apply simple jump-threading to block b,
|
||||
// in which a->b->c become a->c if b is just a Jump.
|
||||
// The result is true if the optimization was applied.
|
||||
//
|
||||
func jumpThreading(f *Function, b *BasicBlock) bool {
|
||||
if b.Index == 0 {
|
||||
return false // don't apply to entry block
|
||||
}
|
||||
if b.Instrs == nil {
|
||||
return false
|
||||
}
|
||||
if _, ok := b.Instrs[0].(*Jump); !ok {
|
||||
return false // not just a jump
|
||||
}
|
||||
c := b.Succs[0]
|
||||
if c == b {
|
||||
return false // don't apply to degenerate jump-to-self.
|
||||
}
|
||||
if c.hasPhi() {
|
||||
return false // not sound without more effort
|
||||
}
|
||||
for j, a := range b.Preds {
|
||||
a.replaceSucc(b, c)
|
||||
|
||||
// If a now has two edges to c, replace its degenerate If by Jump.
|
||||
if len(a.Succs) == 2 && a.Succs[0] == c && a.Succs[1] == c {
|
||||
jump := new(Jump)
|
||||
jump.setBlock(a)
|
||||
a.Instrs[len(a.Instrs)-1] = jump
|
||||
a.Succs = a.Succs[:1]
|
||||
c.removePred(b)
|
||||
} else {
|
||||
if j == 0 {
|
||||
c.replacePred(b, a)
|
||||
} else {
|
||||
c.Preds = append(c.Preds, a)
|
||||
}
|
||||
}
|
||||
|
||||
if debugBlockOpt {
|
||||
fmt.Fprintln(os.Stderr, "jumpThreading", a, b, c)
|
||||
}
|
||||
}
|
||||
f.Blocks[b.Index] = nil // delete b
|
||||
return true
|
||||
}
|
||||
|
||||
// fuseBlocks attempts to apply the block fusion optimization to block
|
||||
// a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1.
|
||||
// The result is true if the optimization was applied.
|
||||
//
|
||||
func fuseBlocks(f *Function, a *BasicBlock) bool {
|
||||
if len(a.Succs) != 1 {
|
||||
return false
|
||||
}
|
||||
b := a.Succs[0]
|
||||
if len(b.Preds) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Degenerate &&/|| ops may result in a straight-line CFG
|
||||
// containing φ-nodes. (Ideally we'd replace such them with
|
||||
// their sole operand but that requires Referrers, built later.)
|
||||
if b.hasPhi() {
|
||||
return false // not sound without further effort
|
||||
}
|
||||
|
||||
// Eliminate jump at end of A, then copy all of B across.
|
||||
a.Instrs = append(a.Instrs[:len(a.Instrs)-1], b.Instrs...)
|
||||
for _, instr := range b.Instrs {
|
||||
instr.setBlock(a)
|
||||
}
|
||||
|
||||
// A inherits B's successors
|
||||
a.Succs = append(a.succs2[:0], b.Succs...)
|
||||
|
||||
// Fix up Preds links of all successors of B.
|
||||
for _, c := range b.Succs {
|
||||
c.replacePred(b, a)
|
||||
}
|
||||
|
||||
if debugBlockOpt {
|
||||
fmt.Fprintln(os.Stderr, "fuseBlocks", a, b)
|
||||
}
|
||||
|
||||
f.Blocks[b.Index] = nil // delete b
|
||||
return true
|
||||
}
|
||||
|
||||
// optimizeBlocks() performs some simple block optimizations on a
|
||||
// completed function: dead block elimination, block fusion, jump
|
||||
// threading.
|
||||
//
|
||||
func optimizeBlocks(f *Function) {
|
||||
deleteUnreachableBlocks(f)
|
||||
|
||||
// Loop until no further progress.
|
||||
changed := true
|
||||
for changed {
|
||||
changed = false
|
||||
|
||||
if debugBlockOpt {
|
||||
f.WriteTo(os.Stderr)
|
||||
mustSanityCheck(f, nil)
|
||||
}
|
||||
|
||||
for _, b := range f.Blocks {
|
||||
// f.Blocks will temporarily contain nils to indicate
|
||||
// deleted blocks; we remove them at the end.
|
||||
if b == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Fuse blocks. b->c becomes bc.
|
||||
if fuseBlocks(f, b) {
|
||||
changed = true
|
||||
}
|
||||
|
||||
// a->b->c becomes a->c if b contains only a Jump.
|
||||
if jumpThreading(f, b) {
|
||||
changed = true
|
||||
continue // (b was disconnected)
|
||||
}
|
||||
}
|
||||
}
|
||||
f.removeNilBlocks()
|
||||
}
|
||||
2382
vendor/golang.org/x/tools/go/ssa/builder.go
generated
vendored
Normal file
2382
vendor/golang.org/x/tools/go/ssa/builder.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
169
vendor/golang.org/x/tools/go/ssa/const.go
generated
vendored
Normal file
169
vendor/golang.org/x/tools/go/ssa/const.go
generated
vendored
Normal file
@@ -0,0 +1,169 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines the Const SSA value type.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// NewConst returns a new constant of the specified value and type.
|
||||
// val must be valid according to the specification of Const.Value.
|
||||
//
|
||||
func NewConst(val constant.Value, typ types.Type) *Const {
|
||||
return &Const{typ, val}
|
||||
}
|
||||
|
||||
// intConst returns an 'int' constant that evaluates to i.
|
||||
// (i is an int64 in case the host is narrower than the target.)
|
||||
func intConst(i int64) *Const {
|
||||
return NewConst(constant.MakeInt64(i), tInt)
|
||||
}
|
||||
|
||||
// nilConst returns a nil constant of the specified type, which may
|
||||
// be any reference type, including interfaces.
|
||||
//
|
||||
func nilConst(typ types.Type) *Const {
|
||||
return NewConst(nil, typ)
|
||||
}
|
||||
|
||||
// stringConst returns a 'string' constant that evaluates to s.
|
||||
func stringConst(s string) *Const {
|
||||
return NewConst(constant.MakeString(s), tString)
|
||||
}
|
||||
|
||||
// zeroConst returns a new "zero" constant of the specified type,
|
||||
// which must not be an array or struct type: the zero values of
|
||||
// aggregates are well-defined but cannot be represented by Const.
|
||||
//
|
||||
func zeroConst(t types.Type) *Const {
|
||||
switch t := t.(type) {
|
||||
case *types.Basic:
|
||||
switch {
|
||||
case t.Info()&types.IsBoolean != 0:
|
||||
return NewConst(constant.MakeBool(false), t)
|
||||
case t.Info()&types.IsNumeric != 0:
|
||||
return NewConst(constant.MakeInt64(0), t)
|
||||
case t.Info()&types.IsString != 0:
|
||||
return NewConst(constant.MakeString(""), t)
|
||||
case t.Kind() == types.UnsafePointer:
|
||||
fallthrough
|
||||
case t.Kind() == types.UntypedNil:
|
||||
return nilConst(t)
|
||||
default:
|
||||
panic(fmt.Sprint("zeroConst for unexpected type:", t))
|
||||
}
|
||||
case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
|
||||
return nilConst(t)
|
||||
case *types.Named:
|
||||
return NewConst(zeroConst(t.Underlying()).Value, t)
|
||||
case *types.Array, *types.Struct, *types.Tuple:
|
||||
panic(fmt.Sprint("zeroConst applied to aggregate:", t))
|
||||
}
|
||||
panic(fmt.Sprint("zeroConst: unexpected ", t))
|
||||
}
|
||||
|
||||
func (c *Const) RelString(from *types.Package) string {
|
||||
var s string
|
||||
if c.Value == nil {
|
||||
s = "nil"
|
||||
} else if c.Value.Kind() == constant.String {
|
||||
s = constant.StringVal(c.Value)
|
||||
const max = 20
|
||||
// TODO(adonovan): don't cut a rune in half.
|
||||
if len(s) > max {
|
||||
s = s[:max-3] + "..." // abbreviate
|
||||
}
|
||||
s = strconv.Quote(s)
|
||||
} else {
|
||||
s = c.Value.String()
|
||||
}
|
||||
return s + ":" + relType(c.Type(), from)
|
||||
}
|
||||
|
||||
func (c *Const) Name() string {
|
||||
return c.RelString(nil)
|
||||
}
|
||||
|
||||
func (c *Const) String() string {
|
||||
return c.Name()
|
||||
}
|
||||
|
||||
func (c *Const) Type() types.Type {
|
||||
return c.typ
|
||||
}
|
||||
|
||||
func (c *Const) Referrers() *[]Instruction {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Const) Parent() *Function { return nil }
|
||||
|
||||
func (c *Const) Pos() token.Pos {
|
||||
return token.NoPos
|
||||
}
|
||||
|
||||
// IsNil returns true if this constant represents a typed or untyped nil value.
|
||||
func (c *Const) IsNil() bool {
|
||||
return c.Value == nil
|
||||
}
|
||||
|
||||
// TODO(adonovan): move everything below into golang.org/x/tools/go/ssa/interp.
|
||||
|
||||
// Int64 returns the numeric value of this constant truncated to fit
|
||||
// a signed 64-bit integer.
|
||||
//
|
||||
func (c *Const) Int64() int64 {
|
||||
switch x := constant.ToInt(c.Value); x.Kind() {
|
||||
case constant.Int:
|
||||
if i, ok := constant.Int64Val(x); ok {
|
||||
return i
|
||||
}
|
||||
return 0
|
||||
case constant.Float:
|
||||
f, _ := constant.Float64Val(x)
|
||||
return int64(f)
|
||||
}
|
||||
panic(fmt.Sprintf("unexpected constant value: %T", c.Value))
|
||||
}
|
||||
|
||||
// Uint64 returns the numeric value of this constant truncated to fit
|
||||
// an unsigned 64-bit integer.
|
||||
//
|
||||
func (c *Const) Uint64() uint64 {
|
||||
switch x := constant.ToInt(c.Value); x.Kind() {
|
||||
case constant.Int:
|
||||
if u, ok := constant.Uint64Val(x); ok {
|
||||
return u
|
||||
}
|
||||
return 0
|
||||
case constant.Float:
|
||||
f, _ := constant.Float64Val(x)
|
||||
return uint64(f)
|
||||
}
|
||||
panic(fmt.Sprintf("unexpected constant value: %T", c.Value))
|
||||
}
|
||||
|
||||
// Float64 returns the numeric value of this constant truncated to fit
|
||||
// a float64.
|
||||
//
|
||||
func (c *Const) Float64() float64 {
|
||||
f, _ := constant.Float64Val(c.Value)
|
||||
return f
|
||||
}
|
||||
|
||||
// Complex128 returns the complex value of this constant truncated to
|
||||
// fit a complex128.
|
||||
//
|
||||
func (c *Const) Complex128() complex128 {
|
||||
re, _ := constant.Float64Val(constant.Real(c.Value))
|
||||
im, _ := constant.Float64Val(constant.Imag(c.Value))
|
||||
return complex(re, im)
|
||||
}
|
||||
270
vendor/golang.org/x/tools/go/ssa/create.go
generated
vendored
Normal file
270
vendor/golang.org/x/tools/go/ssa/create.go
generated
vendored
Normal file
@@ -0,0 +1,270 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file implements the CREATE phase of SSA construction.
|
||||
// See builder.go for explanation.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/go/types/typeutil"
|
||||
)
|
||||
|
||||
// NewProgram returns a new SSA Program.
|
||||
//
|
||||
// mode controls diagnostics and checking during SSA construction.
|
||||
//
|
||||
func NewProgram(fset *token.FileSet, mode BuilderMode) *Program {
|
||||
prog := &Program{
|
||||
Fset: fset,
|
||||
imported: make(map[string]*Package),
|
||||
packages: make(map[*types.Package]*Package),
|
||||
thunks: make(map[selectionKey]*Function),
|
||||
bounds: make(map[*types.Func]*Function),
|
||||
mode: mode,
|
||||
}
|
||||
|
||||
h := typeutil.MakeHasher() // protected by methodsMu, in effect
|
||||
prog.methodSets.SetHasher(h)
|
||||
prog.canon.SetHasher(h)
|
||||
|
||||
return prog
|
||||
}
|
||||
|
||||
// memberFromObject populates package pkg with a member for the
|
||||
// typechecker object obj.
|
||||
//
|
||||
// For objects from Go source code, syntax is the associated syntax
|
||||
// tree (for funcs and vars only); it will be used during the build
|
||||
// phase.
|
||||
//
|
||||
func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) {
|
||||
name := obj.Name()
|
||||
switch obj := obj.(type) {
|
||||
case *types.Builtin:
|
||||
if pkg.Pkg != types.Unsafe {
|
||||
panic("unexpected builtin object: " + obj.String())
|
||||
}
|
||||
|
||||
case *types.TypeName:
|
||||
pkg.Members[name] = &Type{
|
||||
object: obj,
|
||||
pkg: pkg,
|
||||
}
|
||||
|
||||
case *types.Const:
|
||||
c := &NamedConst{
|
||||
object: obj,
|
||||
Value: NewConst(obj.Val(), obj.Type()),
|
||||
pkg: pkg,
|
||||
}
|
||||
pkg.values[obj] = c.Value
|
||||
pkg.Members[name] = c
|
||||
|
||||
case *types.Var:
|
||||
g := &Global{
|
||||
Pkg: pkg,
|
||||
name: name,
|
||||
object: obj,
|
||||
typ: types.NewPointer(obj.Type()), // address
|
||||
pos: obj.Pos(),
|
||||
}
|
||||
pkg.values[obj] = g
|
||||
pkg.Members[name] = g
|
||||
|
||||
case *types.Func:
|
||||
sig := obj.Type().(*types.Signature)
|
||||
if sig.Recv() == nil && name == "init" {
|
||||
pkg.ninit++
|
||||
name = fmt.Sprintf("init#%d", pkg.ninit)
|
||||
}
|
||||
fn := &Function{
|
||||
name: name,
|
||||
object: obj,
|
||||
Signature: sig,
|
||||
syntax: syntax,
|
||||
pos: obj.Pos(),
|
||||
Pkg: pkg,
|
||||
Prog: pkg.Prog,
|
||||
}
|
||||
if syntax == nil {
|
||||
fn.Synthetic = "loaded from gc object file"
|
||||
}
|
||||
|
||||
pkg.values[obj] = fn
|
||||
if sig.Recv() == nil {
|
||||
pkg.Members[name] = fn // package-level function
|
||||
}
|
||||
|
||||
default: // (incl. *types.Package)
|
||||
panic("unexpected Object type: " + obj.String())
|
||||
}
|
||||
}
|
||||
|
||||
// membersFromDecl populates package pkg with members for each
|
||||
// typechecker object (var, func, const or type) associated with the
|
||||
// specified decl.
|
||||
//
|
||||
func membersFromDecl(pkg *Package, decl ast.Decl) {
|
||||
switch decl := decl.(type) {
|
||||
case *ast.GenDecl: // import, const, type or var
|
||||
switch decl.Tok {
|
||||
case token.CONST:
|
||||
for _, spec := range decl.Specs {
|
||||
for _, id := range spec.(*ast.ValueSpec).Names {
|
||||
if !isBlankIdent(id) {
|
||||
memberFromObject(pkg, pkg.info.Defs[id], nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case token.VAR:
|
||||
for _, spec := range decl.Specs {
|
||||
for _, id := range spec.(*ast.ValueSpec).Names {
|
||||
if !isBlankIdent(id) {
|
||||
memberFromObject(pkg, pkg.info.Defs[id], spec)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case token.TYPE:
|
||||
for _, spec := range decl.Specs {
|
||||
id := spec.(*ast.TypeSpec).Name
|
||||
if !isBlankIdent(id) {
|
||||
memberFromObject(pkg, pkg.info.Defs[id], nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case *ast.FuncDecl:
|
||||
id := decl.Name
|
||||
if !isBlankIdent(id) {
|
||||
memberFromObject(pkg, pkg.info.Defs[id], decl)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CreatePackage constructs and returns an SSA Package from the
|
||||
// specified type-checked, error-free file ASTs, and populates its
|
||||
// Members mapping.
|
||||
//
|
||||
// importable determines whether this package should be returned by a
|
||||
// subsequent call to ImportedPackage(pkg.Path()).
|
||||
//
|
||||
// The real work of building SSA form for each function is not done
|
||||
// until a subsequent call to Package.Build().
|
||||
//
|
||||
func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package {
|
||||
p := &Package{
|
||||
Prog: prog,
|
||||
Members: make(map[string]Member),
|
||||
values: make(map[types.Object]Value),
|
||||
Pkg: pkg,
|
||||
info: info, // transient (CREATE and BUILD phases)
|
||||
files: files, // transient (CREATE and BUILD phases)
|
||||
}
|
||||
|
||||
// Add init() function.
|
||||
p.init = &Function{
|
||||
name: "init",
|
||||
Signature: new(types.Signature),
|
||||
Synthetic: "package initializer",
|
||||
Pkg: p,
|
||||
Prog: prog,
|
||||
}
|
||||
p.Members[p.init.name] = p.init
|
||||
|
||||
// CREATE phase.
|
||||
// Allocate all package members: vars, funcs, consts and types.
|
||||
if len(files) > 0 {
|
||||
// Go source package.
|
||||
for _, file := range files {
|
||||
for _, decl := range file.Decls {
|
||||
membersFromDecl(p, decl)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// GC-compiled binary package (or "unsafe")
|
||||
// No code.
|
||||
// No position information.
|
||||
scope := p.Pkg.Scope()
|
||||
for _, name := range scope.Names() {
|
||||
obj := scope.Lookup(name)
|
||||
memberFromObject(p, obj, nil)
|
||||
if obj, ok := obj.(*types.TypeName); ok {
|
||||
if named, ok := obj.Type().(*types.Named); ok {
|
||||
for i, n := 0, named.NumMethods(); i < n; i++ {
|
||||
memberFromObject(p, named.Method(i), nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if prog.mode&BareInits == 0 {
|
||||
// Add initializer guard variable.
|
||||
initguard := &Global{
|
||||
Pkg: p,
|
||||
name: "init$guard",
|
||||
typ: types.NewPointer(tBool),
|
||||
}
|
||||
p.Members[initguard.Name()] = initguard
|
||||
}
|
||||
|
||||
if prog.mode&GlobalDebug != 0 {
|
||||
p.SetDebugMode(true)
|
||||
}
|
||||
|
||||
if prog.mode&PrintPackages != 0 {
|
||||
printMu.Lock()
|
||||
p.WriteTo(os.Stdout)
|
||||
printMu.Unlock()
|
||||
}
|
||||
|
||||
if importable {
|
||||
prog.imported[p.Pkg.Path()] = p
|
||||
}
|
||||
prog.packages[p.Pkg] = p
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// printMu serializes printing of Packages/Functions to stdout.
|
||||
var printMu sync.Mutex
|
||||
|
||||
// AllPackages returns a new slice containing all packages in the
|
||||
// program prog in unspecified order.
|
||||
//
|
||||
func (prog *Program) AllPackages() []*Package {
|
||||
pkgs := make([]*Package, 0, len(prog.packages))
|
||||
for _, pkg := range prog.packages {
|
||||
pkgs = append(pkgs, pkg)
|
||||
}
|
||||
return pkgs
|
||||
}
|
||||
|
||||
// ImportedPackage returns the importable Package whose PkgPath
|
||||
// is path, or nil if no such Package has been created.
|
||||
//
|
||||
// A parameter to CreatePackage determines whether a package should be
|
||||
// considered importable. For example, no import declaration can resolve
|
||||
// to the ad-hoc main package created by 'go build foo.go'.
|
||||
//
|
||||
// TODO(adonovan): rethink this function and the "importable" concept;
|
||||
// most packages are importable. This function assumes that all
|
||||
// types.Package.Path values are unique within the ssa.Program, which is
|
||||
// false---yet this function remains very convenient.
|
||||
// Clients should use (*Program).Package instead where possible.
|
||||
// SSA doesn't really need a string-keyed map of packages.
|
||||
//
|
||||
func (prog *Program) ImportedPackage(path string) *Package {
|
||||
return prog.imported[path]
|
||||
}
|
||||
125
vendor/golang.org/x/tools/go/ssa/doc.go
generated
vendored
Normal file
125
vendor/golang.org/x/tools/go/ssa/doc.go
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ssa defines a representation of the elements of Go programs
|
||||
// (packages, types, functions, variables and constants) using a
|
||||
// static single-assignment (SSA) form intermediate representation
|
||||
// (IR) for the bodies of functions.
|
||||
//
|
||||
// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE.
|
||||
//
|
||||
// For an introduction to SSA form, see
|
||||
// http://en.wikipedia.org/wiki/Static_single_assignment_form.
|
||||
// This page provides a broader reading list:
|
||||
// http://www.dcs.gla.ac.uk/~jsinger/ssa.html.
|
||||
//
|
||||
// The level of abstraction of the SSA form is intentionally close to
|
||||
// the source language to facilitate construction of source analysis
|
||||
// tools. It is not intended for machine code generation.
|
||||
//
|
||||
// All looping, branching and switching constructs are replaced with
|
||||
// unstructured control flow. Higher-level control flow constructs
|
||||
// such as multi-way branch can be reconstructed as needed; see
|
||||
// ssautil.Switches() for an example.
|
||||
//
|
||||
// The simplest way to create the SSA representation of a package is
|
||||
// to load typed syntax trees using golang.org/x/tools/go/packages, then
|
||||
// invoke the ssautil.Packages helper function. See ExampleLoadPackages
|
||||
// and ExampleWholeProgram for examples.
|
||||
// The resulting ssa.Program contains all the packages and their
|
||||
// members, but SSA code is not created for function bodies until a
|
||||
// subsequent call to (*Package).Build or (*Program).Build.
|
||||
//
|
||||
// The builder initially builds a naive SSA form in which all local
|
||||
// variables are addresses of stack locations with explicit loads and
|
||||
// stores. Registerisation of eligible locals and φ-node insertion
|
||||
// using dominance and dataflow are then performed as a second pass
|
||||
// called "lifting" to improve the accuracy and performance of
|
||||
// subsequent analyses; this pass can be skipped by setting the
|
||||
// NaiveForm builder flag.
|
||||
//
|
||||
// The primary interfaces of this package are:
|
||||
//
|
||||
// - Member: a named member of a Go package.
|
||||
// - Value: an expression that yields a value.
|
||||
// - Instruction: a statement that consumes values and performs computation.
|
||||
// - Node: a Value or Instruction (emphasizing its membership in the SSA value graph)
|
||||
//
|
||||
// A computation that yields a result implements both the Value and
|
||||
// Instruction interfaces. The following table shows for each
|
||||
// concrete type which of these interfaces it implements.
|
||||
//
|
||||
// Value? Instruction? Member?
|
||||
// *Alloc ✔ ✔
|
||||
// *BinOp ✔ ✔
|
||||
// *Builtin ✔
|
||||
// *Call ✔ ✔
|
||||
// *ChangeInterface ✔ ✔
|
||||
// *ChangeType ✔ ✔
|
||||
// *Const ✔
|
||||
// *Convert ✔ ✔
|
||||
// *DebugRef ✔
|
||||
// *Defer ✔
|
||||
// *Extract ✔ ✔
|
||||
// *Field ✔ ✔
|
||||
// *FieldAddr ✔ ✔
|
||||
// *FreeVar ✔
|
||||
// *Function ✔ ✔ (func)
|
||||
// *Global ✔ ✔ (var)
|
||||
// *Go ✔
|
||||
// *If ✔
|
||||
// *Index ✔ ✔
|
||||
// *IndexAddr ✔ ✔
|
||||
// *Jump ✔
|
||||
// *Lookup ✔ ✔
|
||||
// *MakeChan ✔ ✔
|
||||
// *MakeClosure ✔ ✔
|
||||
// *MakeInterface ✔ ✔
|
||||
// *MakeMap ✔ ✔
|
||||
// *MakeSlice ✔ ✔
|
||||
// *MapUpdate ✔
|
||||
// *NamedConst ✔ (const)
|
||||
// *Next ✔ ✔
|
||||
// *Panic ✔
|
||||
// *Parameter ✔
|
||||
// *Phi ✔ ✔
|
||||
// *Range ✔ ✔
|
||||
// *Return ✔
|
||||
// *RunDefers ✔
|
||||
// *Select ✔ ✔
|
||||
// *Send ✔
|
||||
// *Slice ✔ ✔
|
||||
// *Store ✔
|
||||
// *Type ✔ (type)
|
||||
// *TypeAssert ✔ ✔
|
||||
// *UnOp ✔ ✔
|
||||
//
|
||||
// Other key types in this package include: Program, Package, Function
|
||||
// and BasicBlock.
|
||||
//
|
||||
// The program representation constructed by this package is fully
|
||||
// resolved internally, i.e. it does not rely on the names of Values,
|
||||
// Packages, Functions, Types or BasicBlocks for the correct
|
||||
// interpretation of the program. Only the identities of objects and
|
||||
// the topology of the SSA and type graphs are semantically
|
||||
// significant. (There is one exception: Ids, used to identify field
|
||||
// and method names, contain strings.) Avoidance of name-based
|
||||
// operations simplifies the implementation of subsequent passes and
|
||||
// can make them very efficient. Many objects are nonetheless named
|
||||
// to aid in debugging, but it is not essential that the names be
|
||||
// either accurate or unambiguous. The public API exposes a number of
|
||||
// name-based maps for client convenience.
|
||||
//
|
||||
// The ssa/ssautil package provides various utilities that depend only
|
||||
// on the public API of this package.
|
||||
//
|
||||
// TODO(adonovan): Consider the exceptional control-flow implications
|
||||
// of defer and recover().
|
||||
//
|
||||
// TODO(adonovan): write a how-to document for all the various cases
|
||||
// of trying to determine corresponding elements across the four
|
||||
// domains of source locations, ast.Nodes, types.Objects,
|
||||
// ssa.Values/Instructions.
|
||||
//
|
||||
package ssa // import "golang.org/x/tools/go/ssa"
|
||||
341
vendor/golang.org/x/tools/go/ssa/dom.go
generated
vendored
Normal file
341
vendor/golang.org/x/tools/go/ssa/dom.go
generated
vendored
Normal file
@@ -0,0 +1,341 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines algorithms related to dominance.
|
||||
|
||||
// Dominator tree construction ----------------------------------------
|
||||
//
|
||||
// We use the algorithm described in Lengauer & Tarjan. 1979. A fast
|
||||
// algorithm for finding dominators in a flowgraph.
|
||||
// http://doi.acm.org/10.1145/357062.357071
|
||||
//
|
||||
// We also apply the optimizations to SLT described in Georgiadis et
|
||||
// al, Finding Dominators in Practice, JGAA 2006,
|
||||
// http://jgaa.info/accepted/2006/GeorgiadisTarjanWerneck2006.10.1.pdf
|
||||
// to avoid the need for buckets of size > 1.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Idom returns the block that immediately dominates b:
|
||||
// its parent in the dominator tree, if any.
|
||||
// Neither the entry node (b.Index==0) nor recover node
|
||||
// (b==b.Parent().Recover()) have a parent.
|
||||
//
|
||||
func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom }
|
||||
|
||||
// Dominees returns the list of blocks that b immediately dominates:
|
||||
// its children in the dominator tree.
|
||||
//
|
||||
func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children }
|
||||
|
||||
// Dominates reports whether b dominates c.
|
||||
func (b *BasicBlock) Dominates(c *BasicBlock) bool {
|
||||
return b.dom.pre <= c.dom.pre && c.dom.post <= b.dom.post
|
||||
}
|
||||
|
||||
type byDomPreorder []*BasicBlock
|
||||
|
||||
func (a byDomPreorder) Len() int { return len(a) }
|
||||
func (a byDomPreorder) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre }
|
||||
|
||||
// DomPreorder returns a new slice containing the blocks of f in
|
||||
// dominator tree preorder.
|
||||
//
|
||||
func (f *Function) DomPreorder() []*BasicBlock {
|
||||
n := len(f.Blocks)
|
||||
order := make(byDomPreorder, n, n)
|
||||
copy(order, f.Blocks)
|
||||
sort.Sort(order)
|
||||
return order
|
||||
}
|
||||
|
||||
// domInfo contains a BasicBlock's dominance information.
|
||||
type domInfo struct {
|
||||
idom *BasicBlock // immediate dominator (parent in domtree)
|
||||
children []*BasicBlock // nodes immediately dominated by this one
|
||||
pre, post int32 // pre- and post-order numbering within domtree
|
||||
}
|
||||
|
||||
// ltState holds the working state for Lengauer-Tarjan algorithm
|
||||
// (during which domInfo.pre is repurposed for CFG DFS preorder number).
|
||||
type ltState struct {
|
||||
// Each slice is indexed by b.Index.
|
||||
sdom []*BasicBlock // b's semidominator
|
||||
parent []*BasicBlock // b's parent in DFS traversal of CFG
|
||||
ancestor []*BasicBlock // b's ancestor with least sdom
|
||||
}
|
||||
|
||||
// dfs implements the depth-first search part of the LT algorithm.
|
||||
func (lt *ltState) dfs(v *BasicBlock, i int32, preorder []*BasicBlock) int32 {
|
||||
preorder[i] = v
|
||||
v.dom.pre = i // For now: DFS preorder of spanning tree of CFG
|
||||
i++
|
||||
lt.sdom[v.Index] = v
|
||||
lt.link(nil, v)
|
||||
for _, w := range v.Succs {
|
||||
if lt.sdom[w.Index] == nil {
|
||||
lt.parent[w.Index] = v
|
||||
i = lt.dfs(w, i, preorder)
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// eval implements the EVAL part of the LT algorithm.
|
||||
func (lt *ltState) eval(v *BasicBlock) *BasicBlock {
|
||||
// TODO(adonovan): opt: do path compression per simple LT.
|
||||
u := v
|
||||
for ; lt.ancestor[v.Index] != nil; v = lt.ancestor[v.Index] {
|
||||
if lt.sdom[v.Index].dom.pre < lt.sdom[u.Index].dom.pre {
|
||||
u = v
|
||||
}
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
// link implements the LINK part of the LT algorithm.
|
||||
func (lt *ltState) link(v, w *BasicBlock) {
|
||||
lt.ancestor[w.Index] = v
|
||||
}
|
||||
|
||||
// buildDomTree computes the dominator tree of f using the LT algorithm.
|
||||
// Precondition: all blocks are reachable (e.g. optimizeBlocks has been run).
|
||||
//
|
||||
func buildDomTree(f *Function) {
|
||||
// The step numbers refer to the original LT paper; the
|
||||
// reordering is due to Georgiadis.
|
||||
|
||||
// Clear any previous domInfo.
|
||||
for _, b := range f.Blocks {
|
||||
b.dom = domInfo{}
|
||||
}
|
||||
|
||||
n := len(f.Blocks)
|
||||
// Allocate space for 5 contiguous [n]*BasicBlock arrays:
|
||||
// sdom, parent, ancestor, preorder, buckets.
|
||||
space := make([]*BasicBlock, 5*n, 5*n)
|
||||
lt := ltState{
|
||||
sdom: space[0:n],
|
||||
parent: space[n : 2*n],
|
||||
ancestor: space[2*n : 3*n],
|
||||
}
|
||||
|
||||
// Step 1. Number vertices by depth-first preorder.
|
||||
preorder := space[3*n : 4*n]
|
||||
root := f.Blocks[0]
|
||||
prenum := lt.dfs(root, 0, preorder)
|
||||
recover := f.Recover
|
||||
if recover != nil {
|
||||
lt.dfs(recover, prenum, preorder)
|
||||
}
|
||||
|
||||
buckets := space[4*n : 5*n]
|
||||
copy(buckets, preorder)
|
||||
|
||||
// In reverse preorder...
|
||||
for i := int32(n) - 1; i > 0; i-- {
|
||||
w := preorder[i]
|
||||
|
||||
// Step 3. Implicitly define the immediate dominator of each node.
|
||||
for v := buckets[i]; v != w; v = buckets[v.dom.pre] {
|
||||
u := lt.eval(v)
|
||||
if lt.sdom[u.Index].dom.pre < i {
|
||||
v.dom.idom = u
|
||||
} else {
|
||||
v.dom.idom = w
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2. Compute the semidominators of all nodes.
|
||||
lt.sdom[w.Index] = lt.parent[w.Index]
|
||||
for _, v := range w.Preds {
|
||||
u := lt.eval(v)
|
||||
if lt.sdom[u.Index].dom.pre < lt.sdom[w.Index].dom.pre {
|
||||
lt.sdom[w.Index] = lt.sdom[u.Index]
|
||||
}
|
||||
}
|
||||
|
||||
lt.link(lt.parent[w.Index], w)
|
||||
|
||||
if lt.parent[w.Index] == lt.sdom[w.Index] {
|
||||
w.dom.idom = lt.parent[w.Index]
|
||||
} else {
|
||||
buckets[i] = buckets[lt.sdom[w.Index].dom.pre]
|
||||
buckets[lt.sdom[w.Index].dom.pre] = w
|
||||
}
|
||||
}
|
||||
|
||||
// The final 'Step 3' is now outside the loop.
|
||||
for v := buckets[0]; v != root; v = buckets[v.dom.pre] {
|
||||
v.dom.idom = root
|
||||
}
|
||||
|
||||
// Step 4. Explicitly define the immediate dominator of each
|
||||
// node, in preorder.
|
||||
for _, w := range preorder[1:] {
|
||||
if w == root || w == recover {
|
||||
w.dom.idom = nil
|
||||
} else {
|
||||
if w.dom.idom != lt.sdom[w.Index] {
|
||||
w.dom.idom = w.dom.idom.dom.idom
|
||||
}
|
||||
// Calculate Children relation as inverse of Idom.
|
||||
w.dom.idom.dom.children = append(w.dom.idom.dom.children, w)
|
||||
}
|
||||
}
|
||||
|
||||
pre, post := numberDomTree(root, 0, 0)
|
||||
if recover != nil {
|
||||
numberDomTree(recover, pre, post)
|
||||
}
|
||||
|
||||
// printDomTreeDot(os.Stderr, f) // debugging
|
||||
// printDomTreeText(os.Stderr, root, 0) // debugging
|
||||
|
||||
if f.Prog.mode&SanityCheckFunctions != 0 {
|
||||
sanityCheckDomTree(f)
|
||||
}
|
||||
}
|
||||
|
||||
// numberDomTree sets the pre- and post-order numbers of a depth-first
|
||||
// traversal of the dominator tree rooted at v. These are used to
|
||||
// answer dominance queries in constant time.
|
||||
//
|
||||
func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) {
|
||||
v.dom.pre = pre
|
||||
pre++
|
||||
for _, child := range v.dom.children {
|
||||
pre, post = numberDomTree(child, pre, post)
|
||||
}
|
||||
v.dom.post = post
|
||||
post++
|
||||
return pre, post
|
||||
}
|
||||
|
||||
// Testing utilities ----------------------------------------
|
||||
|
||||
// sanityCheckDomTree checks the correctness of the dominator tree
|
||||
// computed by the LT algorithm by comparing against the dominance
|
||||
// relation computed by a naive Kildall-style forward dataflow
|
||||
// analysis (Algorithm 10.16 from the "Dragon" book).
|
||||
//
|
||||
func sanityCheckDomTree(f *Function) {
|
||||
n := len(f.Blocks)
|
||||
|
||||
// D[i] is the set of blocks that dominate f.Blocks[i],
|
||||
// represented as a bit-set of block indices.
|
||||
D := make([]big.Int, n)
|
||||
|
||||
one := big.NewInt(1)
|
||||
|
||||
// all is the set of all blocks; constant.
|
||||
var all big.Int
|
||||
all.Set(one).Lsh(&all, uint(n)).Sub(&all, one)
|
||||
|
||||
// Initialization.
|
||||
for i, b := range f.Blocks {
|
||||
if i == 0 || b == f.Recover {
|
||||
// A root is dominated only by itself.
|
||||
D[i].SetBit(&D[0], 0, 1)
|
||||
} else {
|
||||
// All other blocks are (initially) dominated
|
||||
// by every block.
|
||||
D[i].Set(&all)
|
||||
}
|
||||
}
|
||||
|
||||
// Iteration until fixed point.
|
||||
for changed := true; changed; {
|
||||
changed = false
|
||||
for i, b := range f.Blocks {
|
||||
if i == 0 || b == f.Recover {
|
||||
continue
|
||||
}
|
||||
// Compute intersection across predecessors.
|
||||
var x big.Int
|
||||
x.Set(&all)
|
||||
for _, pred := range b.Preds {
|
||||
x.And(&x, &D[pred.Index])
|
||||
}
|
||||
x.SetBit(&x, i, 1) // a block always dominates itself.
|
||||
if D[i].Cmp(&x) != 0 {
|
||||
D[i].Set(&x)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check the entire relation. O(n^2).
|
||||
// The Recover block (if any) must be treated specially so we skip it.
|
||||
ok := true
|
||||
for i := 0; i < n; i++ {
|
||||
for j := 0; j < n; j++ {
|
||||
b, c := f.Blocks[i], f.Blocks[j]
|
||||
if c == f.Recover {
|
||||
continue
|
||||
}
|
||||
actual := b.Dominates(c)
|
||||
expected := D[j].Bit(i) == 1
|
||||
if actual != expected {
|
||||
fmt.Fprintf(os.Stderr, "dominates(%s, %s)==%t, want %t\n", b, c, actual, expected)
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
preorder := f.DomPreorder()
|
||||
for _, b := range f.Blocks {
|
||||
if got := preorder[b.dom.pre]; got != b {
|
||||
fmt.Fprintf(os.Stderr, "preorder[%d]==%s, want %s\n", b.dom.pre, got, b)
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
panic("sanityCheckDomTree failed for " + f.String())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Printing functions ----------------------------------------
|
||||
|
||||
// printDomTree prints the dominator tree as text, using indentation.
|
||||
func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) {
|
||||
fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v)
|
||||
for _, child := range v.dom.children {
|
||||
printDomTreeText(buf, child, indent+1)
|
||||
}
|
||||
}
|
||||
|
||||
// printDomTreeDot prints the dominator tree of f in AT&T GraphViz
|
||||
// (.dot) format.
|
||||
func printDomTreeDot(buf *bytes.Buffer, f *Function) {
|
||||
fmt.Fprintln(buf, "//", f)
|
||||
fmt.Fprintln(buf, "digraph domtree {")
|
||||
for i, b := range f.Blocks {
|
||||
v := b.dom
|
||||
fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post)
|
||||
// TODO(adonovan): improve appearance of edges
|
||||
// belonging to both dominator tree and CFG.
|
||||
|
||||
// Dominator tree edge.
|
||||
if i != 0 {
|
||||
fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.dom.pre, v.pre)
|
||||
}
|
||||
// CFG edges.
|
||||
for _, pred := range b.Preds {
|
||||
fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.dom.pre, v.pre)
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(buf, "}")
|
||||
}
|
||||
468
vendor/golang.org/x/tools/go/ssa/emit.go
generated
vendored
Normal file
468
vendor/golang.org/x/tools/go/ssa/emit.go
generated
vendored
Normal file
@@ -0,0 +1,468 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// Helpers for emitting SSA instructions.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// emitNew emits to f a new (heap Alloc) instruction allocating an
|
||||
// object of type typ. pos is the optional source location.
|
||||
//
|
||||
func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc {
|
||||
v := &Alloc{Heap: true}
|
||||
v.setType(types.NewPointer(typ))
|
||||
v.setPos(pos)
|
||||
f.emit(v)
|
||||
return v
|
||||
}
|
||||
|
||||
// emitLoad emits to f an instruction to load the address addr into a
|
||||
// new temporary, and returns the value so defined.
|
||||
//
|
||||
func emitLoad(f *Function, addr Value) *UnOp {
|
||||
v := &UnOp{Op: token.MUL, X: addr}
|
||||
v.setType(deref(addr.Type()))
|
||||
f.emit(v)
|
||||
return v
|
||||
}
|
||||
|
||||
// emitDebugRef emits to f a DebugRef pseudo-instruction associating
|
||||
// expression e with value v.
|
||||
//
|
||||
func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) {
|
||||
if !f.debugInfo() {
|
||||
return // debugging not enabled
|
||||
}
|
||||
if v == nil || e == nil {
|
||||
panic("nil")
|
||||
}
|
||||
var obj types.Object
|
||||
e = unparen(e)
|
||||
if id, ok := e.(*ast.Ident); ok {
|
||||
if isBlankIdent(id) {
|
||||
return
|
||||
}
|
||||
obj = f.Pkg.objectOf(id)
|
||||
switch obj.(type) {
|
||||
case *types.Nil, *types.Const, *types.Builtin:
|
||||
return
|
||||
}
|
||||
}
|
||||
f.emit(&DebugRef{
|
||||
X: v,
|
||||
Expr: e,
|
||||
IsAddr: isAddr,
|
||||
object: obj,
|
||||
})
|
||||
}
|
||||
|
||||
// emitArith emits to f code to compute the binary operation op(x, y)
|
||||
// where op is an eager shift, logical or arithmetic operation.
|
||||
// (Use emitCompare() for comparisons and Builder.logicalBinop() for
|
||||
// non-eager operations.)
|
||||
//
|
||||
func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token.Pos) Value {
|
||||
switch op {
|
||||
case token.SHL, token.SHR:
|
||||
x = emitConv(f, x, t)
|
||||
// y may be signed or an 'untyped' constant.
|
||||
// TODO(adonovan): whence signed values?
|
||||
if b, ok := y.Type().Underlying().(*types.Basic); ok && b.Info()&types.IsUnsigned == 0 {
|
||||
y = emitConv(f, y, types.Typ[types.Uint64])
|
||||
}
|
||||
|
||||
case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT:
|
||||
x = emitConv(f, x, t)
|
||||
y = emitConv(f, y, t)
|
||||
|
||||
default:
|
||||
panic("illegal op in emitArith: " + op.String())
|
||||
|
||||
}
|
||||
v := &BinOp{
|
||||
Op: op,
|
||||
X: x,
|
||||
Y: y,
|
||||
}
|
||||
v.setPos(pos)
|
||||
v.setType(t)
|
||||
return f.emit(v)
|
||||
}
|
||||
|
||||
// emitCompare emits to f code compute the boolean result of
|
||||
// comparison comparison 'x op y'.
|
||||
//
|
||||
func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value {
|
||||
xt := x.Type().Underlying()
|
||||
yt := y.Type().Underlying()
|
||||
|
||||
// Special case to optimise a tagless SwitchStmt so that
|
||||
// these are equivalent
|
||||
// switch { case e: ...}
|
||||
// switch true { case e: ... }
|
||||
// if e==true { ... }
|
||||
// even in the case when e's type is an interface.
|
||||
// TODO(adonovan): opt: generalise to x==true, false!=y, etc.
|
||||
if x == vTrue && op == token.EQL {
|
||||
if yt, ok := yt.(*types.Basic); ok && yt.Info()&types.IsBoolean != 0 {
|
||||
return y
|
||||
}
|
||||
}
|
||||
|
||||
if types.Identical(xt, yt) {
|
||||
// no conversion necessary
|
||||
} else if _, ok := xt.(*types.Interface); ok {
|
||||
y = emitConv(f, y, x.Type())
|
||||
} else if _, ok := yt.(*types.Interface); ok {
|
||||
x = emitConv(f, x, y.Type())
|
||||
} else if _, ok := x.(*Const); ok {
|
||||
x = emitConv(f, x, y.Type())
|
||||
} else if _, ok := y.(*Const); ok {
|
||||
y = emitConv(f, y, x.Type())
|
||||
} else {
|
||||
// other cases, e.g. channels. No-op.
|
||||
}
|
||||
|
||||
v := &BinOp{
|
||||
Op: op,
|
||||
X: x,
|
||||
Y: y,
|
||||
}
|
||||
v.setPos(pos)
|
||||
v.setType(tBool)
|
||||
return f.emit(v)
|
||||
}
|
||||
|
||||
// isValuePreserving returns true if a conversion from ut_src to
|
||||
// ut_dst is value-preserving, i.e. just a change of type.
|
||||
// Precondition: neither argument is a named type.
|
||||
//
|
||||
func isValuePreserving(ut_src, ut_dst types.Type) bool {
|
||||
// Identical underlying types?
|
||||
if structTypesIdentical(ut_dst, ut_src) {
|
||||
return true
|
||||
}
|
||||
|
||||
switch ut_dst.(type) {
|
||||
case *types.Chan:
|
||||
// Conversion between channel types?
|
||||
_, ok := ut_src.(*types.Chan)
|
||||
return ok
|
||||
|
||||
case *types.Pointer:
|
||||
// Conversion between pointers with identical base types?
|
||||
_, ok := ut_src.(*types.Pointer)
|
||||
return ok
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// emitConv emits to f code to convert Value val to exactly type typ,
|
||||
// and returns the converted value. Implicit conversions are required
|
||||
// by language assignability rules in assignments, parameter passing,
|
||||
// etc. Conversions cannot fail dynamically.
|
||||
//
|
||||
func emitConv(f *Function, val Value, typ types.Type) Value {
|
||||
t_src := val.Type()
|
||||
|
||||
// Identical types? Conversion is a no-op.
|
||||
if types.Identical(t_src, typ) {
|
||||
return val
|
||||
}
|
||||
|
||||
ut_dst := typ.Underlying()
|
||||
ut_src := t_src.Underlying()
|
||||
|
||||
// Just a change of type, but not value or representation?
|
||||
if isValuePreserving(ut_src, ut_dst) {
|
||||
c := &ChangeType{X: val}
|
||||
c.setType(typ)
|
||||
return f.emit(c)
|
||||
}
|
||||
|
||||
// Conversion to, or construction of a value of, an interface type?
|
||||
if _, ok := ut_dst.(*types.Interface); ok {
|
||||
// Assignment from one interface type to another?
|
||||
if _, ok := ut_src.(*types.Interface); ok {
|
||||
c := &ChangeInterface{X: val}
|
||||
c.setType(typ)
|
||||
return f.emit(c)
|
||||
}
|
||||
|
||||
// Untyped nil constant? Return interface-typed nil constant.
|
||||
if ut_src == tUntypedNil {
|
||||
return nilConst(typ)
|
||||
}
|
||||
|
||||
// Convert (non-nil) "untyped" literals to their default type.
|
||||
if t, ok := ut_src.(*types.Basic); ok && t.Info()&types.IsUntyped != 0 {
|
||||
val = emitConv(f, val, DefaultType(ut_src))
|
||||
}
|
||||
|
||||
f.Pkg.Prog.needMethodsOf(val.Type())
|
||||
mi := &MakeInterface{X: val}
|
||||
mi.setType(typ)
|
||||
return f.emit(mi)
|
||||
}
|
||||
|
||||
// Conversion of a compile-time constant value?
|
||||
if c, ok := val.(*Const); ok {
|
||||
if _, ok := ut_dst.(*types.Basic); ok || c.IsNil() {
|
||||
// Conversion of a compile-time constant to
|
||||
// another constant type results in a new
|
||||
// constant of the destination type and
|
||||
// (initially) the same abstract value.
|
||||
// We don't truncate the value yet.
|
||||
return NewConst(c.Value, typ)
|
||||
}
|
||||
|
||||
// We're converting from constant to non-constant type,
|
||||
// e.g. string -> []byte/[]rune.
|
||||
}
|
||||
|
||||
// A representation-changing conversion?
|
||||
// At least one of {ut_src,ut_dst} must be *Basic.
|
||||
// (The other may be []byte or []rune.)
|
||||
_, ok1 := ut_src.(*types.Basic)
|
||||
_, ok2 := ut_dst.(*types.Basic)
|
||||
if ok1 || ok2 {
|
||||
c := &Convert{X: val}
|
||||
c.setType(typ)
|
||||
return f.emit(c)
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ))
|
||||
}
|
||||
|
||||
// emitStore emits to f an instruction to store value val at location
|
||||
// addr, applying implicit conversions as required by assignability rules.
|
||||
//
|
||||
func emitStore(f *Function, addr, val Value, pos token.Pos) *Store {
|
||||
s := &Store{
|
||||
Addr: addr,
|
||||
Val: emitConv(f, val, deref(addr.Type())),
|
||||
pos: pos,
|
||||
}
|
||||
f.emit(s)
|
||||
return s
|
||||
}
|
||||
|
||||
// emitJump emits to f a jump to target, and updates the control-flow graph.
|
||||
// Postcondition: f.currentBlock is nil.
|
||||
//
|
||||
func emitJump(f *Function, target *BasicBlock) {
|
||||
b := f.currentBlock
|
||||
b.emit(new(Jump))
|
||||
addEdge(b, target)
|
||||
f.currentBlock = nil
|
||||
}
|
||||
|
||||
// emitIf emits to f a conditional jump to tblock or fblock based on
|
||||
// cond, and updates the control-flow graph.
|
||||
// Postcondition: f.currentBlock is nil.
|
||||
//
|
||||
func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock) {
|
||||
b := f.currentBlock
|
||||
b.emit(&If{Cond: cond})
|
||||
addEdge(b, tblock)
|
||||
addEdge(b, fblock)
|
||||
f.currentBlock = nil
|
||||
}
|
||||
|
||||
// emitExtract emits to f an instruction to extract the index'th
|
||||
// component of tuple. It returns the extracted value.
|
||||
//
|
||||
func emitExtract(f *Function, tuple Value, index int) Value {
|
||||
e := &Extract{Tuple: tuple, Index: index}
|
||||
e.setType(tuple.Type().(*types.Tuple).At(index).Type())
|
||||
return f.emit(e)
|
||||
}
|
||||
|
||||
// emitTypeAssert emits to f a type assertion value := x.(t) and
|
||||
// returns the value. x.Type() must be an interface.
|
||||
//
|
||||
func emitTypeAssert(f *Function, x Value, t types.Type, pos token.Pos) Value {
|
||||
a := &TypeAssert{X: x, AssertedType: t}
|
||||
a.setPos(pos)
|
||||
a.setType(t)
|
||||
return f.emit(a)
|
||||
}
|
||||
|
||||
// emitTypeTest emits to f a type test value,ok := x.(t) and returns
|
||||
// a (value, ok) tuple. x.Type() must be an interface.
|
||||
//
|
||||
func emitTypeTest(f *Function, x Value, t types.Type, pos token.Pos) Value {
|
||||
a := &TypeAssert{
|
||||
X: x,
|
||||
AssertedType: t,
|
||||
CommaOk: true,
|
||||
}
|
||||
a.setPos(pos)
|
||||
a.setType(types.NewTuple(
|
||||
newVar("value", t),
|
||||
varOk,
|
||||
))
|
||||
return f.emit(a)
|
||||
}
|
||||
|
||||
// emitTailCall emits to f a function call in tail position. The
|
||||
// caller is responsible for all fields of 'call' except its type.
|
||||
// Intended for wrapper methods.
|
||||
// Precondition: f does/will not use deferred procedure calls.
|
||||
// Postcondition: f.currentBlock is nil.
|
||||
//
|
||||
func emitTailCall(f *Function, call *Call) {
|
||||
tresults := f.Signature.Results()
|
||||
nr := tresults.Len()
|
||||
if nr == 1 {
|
||||
call.typ = tresults.At(0).Type()
|
||||
} else {
|
||||
call.typ = tresults
|
||||
}
|
||||
tuple := f.emit(call)
|
||||
var ret Return
|
||||
switch nr {
|
||||
case 0:
|
||||
// no-op
|
||||
case 1:
|
||||
ret.Results = []Value{tuple}
|
||||
default:
|
||||
for i := 0; i < nr; i++ {
|
||||
v := emitExtract(f, tuple, i)
|
||||
// TODO(adonovan): in principle, this is required:
|
||||
// v = emitConv(f, o.Type, f.Signature.Results[i].Type)
|
||||
// but in practice emitTailCall is only used when
|
||||
// the types exactly match.
|
||||
ret.Results = append(ret.Results, v)
|
||||
}
|
||||
}
|
||||
f.emit(&ret)
|
||||
f.currentBlock = nil
|
||||
}
|
||||
|
||||
// emitImplicitSelections emits to f code to apply the sequence of
|
||||
// implicit field selections specified by indices to base value v, and
|
||||
// returns the selected value.
|
||||
//
|
||||
// If v is the address of a struct, the result will be the address of
|
||||
// a field; if it is the value of a struct, the result will be the
|
||||
// value of a field.
|
||||
//
|
||||
func emitImplicitSelections(f *Function, v Value, indices []int) Value {
|
||||
for _, index := range indices {
|
||||
fld := deref(v.Type()).Underlying().(*types.Struct).Field(index)
|
||||
|
||||
if isPointer(v.Type()) {
|
||||
instr := &FieldAddr{
|
||||
X: v,
|
||||
Field: index,
|
||||
}
|
||||
instr.setType(types.NewPointer(fld.Type()))
|
||||
v = f.emit(instr)
|
||||
// Load the field's value iff indirectly embedded.
|
||||
if isPointer(fld.Type()) {
|
||||
v = emitLoad(f, v)
|
||||
}
|
||||
} else {
|
||||
instr := &Field{
|
||||
X: v,
|
||||
Field: index,
|
||||
}
|
||||
instr.setType(fld.Type())
|
||||
v = f.emit(instr)
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// emitFieldSelection emits to f code to select the index'th field of v.
|
||||
//
|
||||
// If wantAddr, the input must be a pointer-to-struct and the result
|
||||
// will be the field's address; otherwise the result will be the
|
||||
// field's value.
|
||||
// Ident id is used for position and debug info.
|
||||
//
|
||||
func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value {
|
||||
fld := deref(v.Type()).Underlying().(*types.Struct).Field(index)
|
||||
if isPointer(v.Type()) {
|
||||
instr := &FieldAddr{
|
||||
X: v,
|
||||
Field: index,
|
||||
}
|
||||
instr.setPos(id.Pos())
|
||||
instr.setType(types.NewPointer(fld.Type()))
|
||||
v = f.emit(instr)
|
||||
// Load the field's value iff we don't want its address.
|
||||
if !wantAddr {
|
||||
v = emitLoad(f, v)
|
||||
}
|
||||
} else {
|
||||
instr := &Field{
|
||||
X: v,
|
||||
Field: index,
|
||||
}
|
||||
instr.setPos(id.Pos())
|
||||
instr.setType(fld.Type())
|
||||
v = f.emit(instr)
|
||||
}
|
||||
emitDebugRef(f, id, v, wantAddr)
|
||||
return v
|
||||
}
|
||||
|
||||
// zeroValue emits to f code to produce a zero value of type t,
|
||||
// and returns it.
|
||||
//
|
||||
func zeroValue(f *Function, t types.Type) Value {
|
||||
switch t.Underlying().(type) {
|
||||
case *types.Struct, *types.Array:
|
||||
return emitLoad(f, f.addLocal(t, token.NoPos))
|
||||
default:
|
||||
return zeroConst(t)
|
||||
}
|
||||
}
|
||||
|
||||
// createRecoverBlock emits to f a block of code to return after a
|
||||
// recovered panic, and sets f.Recover to it.
|
||||
//
|
||||
// If f's result parameters are named, the code loads and returns
|
||||
// their current values, otherwise it returns the zero values of their
|
||||
// type.
|
||||
//
|
||||
// Idempotent.
|
||||
//
|
||||
func createRecoverBlock(f *Function) {
|
||||
if f.Recover != nil {
|
||||
return // already created
|
||||
}
|
||||
saved := f.currentBlock
|
||||
|
||||
f.Recover = f.newBasicBlock("recover")
|
||||
f.currentBlock = f.Recover
|
||||
|
||||
var results []Value
|
||||
if f.namedResults != nil {
|
||||
// Reload NRPs to form value tuple.
|
||||
for _, r := range f.namedResults {
|
||||
results = append(results, emitLoad(f, r))
|
||||
}
|
||||
} else {
|
||||
R := f.Signature.Results()
|
||||
for i, n := 0, R.Len(); i < n; i++ {
|
||||
T := R.At(i).Type()
|
||||
|
||||
// Return zero value of each result type.
|
||||
results = append(results, zeroValue(f, T))
|
||||
}
|
||||
}
|
||||
f.emit(&Return{Results: results})
|
||||
|
||||
f.currentBlock = saved
|
||||
}
|
||||
691
vendor/golang.org/x/tools/go/ssa/func.go
generated
vendored
Normal file
691
vendor/golang.org/x/tools/go/ssa/func.go
generated
vendored
Normal file
@@ -0,0 +1,691 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file implements the Function and BasicBlock types.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// addEdge adds a control-flow graph edge from from to to.
|
||||
func addEdge(from, to *BasicBlock) {
|
||||
from.Succs = append(from.Succs, to)
|
||||
to.Preds = append(to.Preds, from)
|
||||
}
|
||||
|
||||
// Parent returns the function that contains block b.
|
||||
func (b *BasicBlock) Parent() *Function { return b.parent }
|
||||
|
||||
// String returns a human-readable label of this block.
|
||||
// It is not guaranteed unique within the function.
|
||||
//
|
||||
func (b *BasicBlock) String() string {
|
||||
return fmt.Sprintf("%d", b.Index)
|
||||
}
|
||||
|
||||
// emit appends an instruction to the current basic block.
|
||||
// If the instruction defines a Value, it is returned.
|
||||
//
|
||||
func (b *BasicBlock) emit(i Instruction) Value {
|
||||
i.setBlock(b)
|
||||
b.Instrs = append(b.Instrs, i)
|
||||
v, _ := i.(Value)
|
||||
return v
|
||||
}
|
||||
|
||||
// predIndex returns the i such that b.Preds[i] == c or panics if
|
||||
// there is none.
|
||||
func (b *BasicBlock) predIndex(c *BasicBlock) int {
|
||||
for i, pred := range b.Preds {
|
||||
if pred == c {
|
||||
return i
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("no edge %s -> %s", c, b))
|
||||
}
|
||||
|
||||
// hasPhi returns true if b.Instrs contains φ-nodes.
|
||||
func (b *BasicBlock) hasPhi() bool {
|
||||
_, ok := b.Instrs[0].(*Phi)
|
||||
return ok
|
||||
}
|
||||
|
||||
// phis returns the prefix of b.Instrs containing all the block's φ-nodes.
|
||||
func (b *BasicBlock) phis() []Instruction {
|
||||
for i, instr := range b.Instrs {
|
||||
if _, ok := instr.(*Phi); !ok {
|
||||
return b.Instrs[:i]
|
||||
}
|
||||
}
|
||||
return nil // unreachable in well-formed blocks
|
||||
}
|
||||
|
||||
// replacePred replaces all occurrences of p in b's predecessor list with q.
|
||||
// Ordinarily there should be at most one.
|
||||
//
|
||||
func (b *BasicBlock) replacePred(p, q *BasicBlock) {
|
||||
for i, pred := range b.Preds {
|
||||
if pred == p {
|
||||
b.Preds[i] = q
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// replaceSucc replaces all occurrences of p in b's successor list with q.
|
||||
// Ordinarily there should be at most one.
|
||||
//
|
||||
func (b *BasicBlock) replaceSucc(p, q *BasicBlock) {
|
||||
for i, succ := range b.Succs {
|
||||
if succ == p {
|
||||
b.Succs[i] = q
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removePred removes all occurrences of p in b's
|
||||
// predecessor list and φ-nodes.
|
||||
// Ordinarily there should be at most one.
|
||||
//
|
||||
func (b *BasicBlock) removePred(p *BasicBlock) {
|
||||
phis := b.phis()
|
||||
|
||||
// We must preserve edge order for φ-nodes.
|
||||
j := 0
|
||||
for i, pred := range b.Preds {
|
||||
if pred != p {
|
||||
b.Preds[j] = b.Preds[i]
|
||||
// Strike out φ-edge too.
|
||||
for _, instr := range phis {
|
||||
phi := instr.(*Phi)
|
||||
phi.Edges[j] = phi.Edges[i]
|
||||
}
|
||||
j++
|
||||
}
|
||||
}
|
||||
// Nil out b.Preds[j:] and φ-edges[j:] to aid GC.
|
||||
for i := j; i < len(b.Preds); i++ {
|
||||
b.Preds[i] = nil
|
||||
for _, instr := range phis {
|
||||
instr.(*Phi).Edges[i] = nil
|
||||
}
|
||||
}
|
||||
b.Preds = b.Preds[:j]
|
||||
for _, instr := range phis {
|
||||
phi := instr.(*Phi)
|
||||
phi.Edges = phi.Edges[:j]
|
||||
}
|
||||
}
|
||||
|
||||
// Destinations associated with unlabelled for/switch/select stmts.
|
||||
// We push/pop one of these as we enter/leave each construct and for
|
||||
// each BranchStmt we scan for the innermost target of the right type.
|
||||
//
|
||||
type targets struct {
|
||||
tail *targets // rest of stack
|
||||
_break *BasicBlock
|
||||
_continue *BasicBlock
|
||||
_fallthrough *BasicBlock
|
||||
}
|
||||
|
||||
// Destinations associated with a labelled block.
|
||||
// We populate these as labels are encountered in forward gotos or
|
||||
// labelled statements.
|
||||
//
|
||||
type lblock struct {
|
||||
_goto *BasicBlock
|
||||
_break *BasicBlock
|
||||
_continue *BasicBlock
|
||||
}
|
||||
|
||||
// labelledBlock returns the branch target associated with the
|
||||
// specified label, creating it if needed.
|
||||
//
|
||||
func (f *Function) labelledBlock(label *ast.Ident) *lblock {
|
||||
lb := f.lblocks[label.Obj]
|
||||
if lb == nil {
|
||||
lb = &lblock{_goto: f.newBasicBlock(label.Name)}
|
||||
if f.lblocks == nil {
|
||||
f.lblocks = make(map[*ast.Object]*lblock)
|
||||
}
|
||||
f.lblocks[label.Obj] = lb
|
||||
}
|
||||
return lb
|
||||
}
|
||||
|
||||
// addParam adds a (non-escaping) parameter to f.Params of the
|
||||
// specified name, type and source position.
|
||||
//
|
||||
func (f *Function) addParam(name string, typ types.Type, pos token.Pos) *Parameter {
|
||||
v := &Parameter{
|
||||
name: name,
|
||||
typ: typ,
|
||||
pos: pos,
|
||||
parent: f,
|
||||
}
|
||||
f.Params = append(f.Params, v)
|
||||
return v
|
||||
}
|
||||
|
||||
func (f *Function) addParamObj(obj types.Object) *Parameter {
|
||||
name := obj.Name()
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("arg%d", len(f.Params))
|
||||
}
|
||||
param := f.addParam(name, obj.Type(), obj.Pos())
|
||||
param.object = obj
|
||||
return param
|
||||
}
|
||||
|
||||
// addSpilledParam declares a parameter that is pre-spilled to the
|
||||
// stack; the function body will load/store the spilled location.
|
||||
// Subsequent lifting will eliminate spills where possible.
|
||||
//
|
||||
func (f *Function) addSpilledParam(obj types.Object) {
|
||||
param := f.addParamObj(obj)
|
||||
spill := &Alloc{Comment: obj.Name()}
|
||||
spill.setType(types.NewPointer(obj.Type()))
|
||||
spill.setPos(obj.Pos())
|
||||
f.objects[obj] = spill
|
||||
f.Locals = append(f.Locals, spill)
|
||||
f.emit(spill)
|
||||
f.emit(&Store{Addr: spill, Val: param})
|
||||
}
|
||||
|
||||
// startBody initializes the function prior to generating SSA code for its body.
|
||||
// Precondition: f.Type() already set.
|
||||
//
|
||||
func (f *Function) startBody() {
|
||||
f.currentBlock = f.newBasicBlock("entry")
|
||||
f.objects = make(map[types.Object]Value) // needed for some synthetics, e.g. init
|
||||
}
|
||||
|
||||
// createSyntacticParams populates f.Params and generates code (spills
|
||||
// and named result locals) for all the parameters declared in the
|
||||
// syntax. In addition it populates the f.objects mapping.
|
||||
//
|
||||
// Preconditions:
|
||||
// f.startBody() was called.
|
||||
// Postcondition:
|
||||
// len(f.Params) == len(f.Signature.Params) + (f.Signature.Recv() ? 1 : 0)
|
||||
//
|
||||
func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.FuncType) {
|
||||
// Receiver (at most one inner iteration).
|
||||
if recv != nil {
|
||||
for _, field := range recv.List {
|
||||
for _, n := range field.Names {
|
||||
f.addSpilledParam(f.Pkg.info.Defs[n])
|
||||
}
|
||||
// Anonymous receiver? No need to spill.
|
||||
if field.Names == nil {
|
||||
f.addParamObj(f.Signature.Recv())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parameters.
|
||||
if functype.Params != nil {
|
||||
n := len(f.Params) // 1 if has recv, 0 otherwise
|
||||
for _, field := range functype.Params.List {
|
||||
for _, n := range field.Names {
|
||||
f.addSpilledParam(f.Pkg.info.Defs[n])
|
||||
}
|
||||
// Anonymous parameter? No need to spill.
|
||||
if field.Names == nil {
|
||||
f.addParamObj(f.Signature.Params().At(len(f.Params) - n))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Named results.
|
||||
if functype.Results != nil {
|
||||
for _, field := range functype.Results.List {
|
||||
// Implicit "var" decl of locals for named results.
|
||||
for _, n := range field.Names {
|
||||
f.namedResults = append(f.namedResults, f.addLocalForIdent(n))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type setNumable interface {
|
||||
setNum(int)
|
||||
}
|
||||
|
||||
// numberRegisters assigns numbers to all SSA registers
|
||||
// (value-defining Instructions) in f, to aid debugging.
|
||||
// (Non-Instruction Values are named at construction.)
|
||||
//
|
||||
func numberRegisters(f *Function) {
|
||||
v := 0
|
||||
for _, b := range f.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
switch instr.(type) {
|
||||
case Value:
|
||||
instr.(setNumable).setNum(v)
|
||||
v++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// buildReferrers populates the def/use information in all non-nil
|
||||
// Value.Referrers slice.
|
||||
// Precondition: all such slices are initially empty.
|
||||
func buildReferrers(f *Function) {
|
||||
var rands []*Value
|
||||
for _, b := range f.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
rands = instr.Operands(rands[:0]) // recycle storage
|
||||
for _, rand := range rands {
|
||||
if r := *rand; r != nil {
|
||||
if ref := r.Referrers(); ref != nil {
|
||||
*ref = append(*ref, instr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// finishBody() finalizes the function after SSA code generation of its body.
|
||||
func (f *Function) finishBody() {
|
||||
f.objects = nil
|
||||
f.currentBlock = nil
|
||||
f.lblocks = nil
|
||||
|
||||
// Don't pin the AST in memory (except in debug mode).
|
||||
if n := f.syntax; n != nil && !f.debugInfo() {
|
||||
f.syntax = extentNode{n.Pos(), n.End()}
|
||||
}
|
||||
|
||||
// Remove from f.Locals any Allocs that escape to the heap.
|
||||
j := 0
|
||||
for _, l := range f.Locals {
|
||||
if !l.Heap {
|
||||
f.Locals[j] = l
|
||||
j++
|
||||
}
|
||||
}
|
||||
// Nil out f.Locals[j:] to aid GC.
|
||||
for i := j; i < len(f.Locals); i++ {
|
||||
f.Locals[i] = nil
|
||||
}
|
||||
f.Locals = f.Locals[:j]
|
||||
|
||||
optimizeBlocks(f)
|
||||
|
||||
buildReferrers(f)
|
||||
|
||||
buildDomTree(f)
|
||||
|
||||
if f.Prog.mode&NaiveForm == 0 {
|
||||
// For debugging pre-state of lifting pass:
|
||||
// numberRegisters(f)
|
||||
// f.WriteTo(os.Stderr)
|
||||
lift(f)
|
||||
}
|
||||
|
||||
f.namedResults = nil // (used by lifting)
|
||||
|
||||
numberRegisters(f)
|
||||
|
||||
if f.Prog.mode&PrintFunctions != 0 {
|
||||
printMu.Lock()
|
||||
f.WriteTo(os.Stdout)
|
||||
printMu.Unlock()
|
||||
}
|
||||
|
||||
if f.Prog.mode&SanityCheckFunctions != 0 {
|
||||
mustSanityCheck(f, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// removeNilBlocks eliminates nils from f.Blocks and updates each
|
||||
// BasicBlock.Index. Use this after any pass that may delete blocks.
|
||||
//
|
||||
func (f *Function) removeNilBlocks() {
|
||||
j := 0
|
||||
for _, b := range f.Blocks {
|
||||
if b != nil {
|
||||
b.Index = j
|
||||
f.Blocks[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
// Nil out f.Blocks[j:] to aid GC.
|
||||
for i := j; i < len(f.Blocks); i++ {
|
||||
f.Blocks[i] = nil
|
||||
}
|
||||
f.Blocks = f.Blocks[:j]
|
||||
}
|
||||
|
||||
// SetDebugMode sets the debug mode for package pkg. If true, all its
|
||||
// functions will include full debug info. This greatly increases the
|
||||
// size of the instruction stream, and causes Functions to depend upon
|
||||
// the ASTs, potentially keeping them live in memory for longer.
|
||||
//
|
||||
func (pkg *Package) SetDebugMode(debug bool) {
|
||||
// TODO(adonovan): do we want ast.File granularity?
|
||||
pkg.debug = debug
|
||||
}
|
||||
|
||||
// debugInfo reports whether debug info is wanted for this function.
|
||||
func (f *Function) debugInfo() bool {
|
||||
return f.Pkg != nil && f.Pkg.debug
|
||||
}
|
||||
|
||||
// addNamedLocal creates a local variable, adds it to function f and
|
||||
// returns it. Its name and type are taken from obj. Subsequent
|
||||
// calls to f.lookup(obj) will return the same local.
|
||||
//
|
||||
func (f *Function) addNamedLocal(obj types.Object) *Alloc {
|
||||
l := f.addLocal(obj.Type(), obj.Pos())
|
||||
l.Comment = obj.Name()
|
||||
f.objects[obj] = l
|
||||
return l
|
||||
}
|
||||
|
||||
func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc {
|
||||
return f.addNamedLocal(f.Pkg.info.Defs[id])
|
||||
}
|
||||
|
||||
// addLocal creates an anonymous local variable of type typ, adds it
|
||||
// to function f and returns it. pos is the optional source location.
|
||||
//
|
||||
func (f *Function) addLocal(typ types.Type, pos token.Pos) *Alloc {
|
||||
v := &Alloc{}
|
||||
v.setType(types.NewPointer(typ))
|
||||
v.setPos(pos)
|
||||
f.Locals = append(f.Locals, v)
|
||||
f.emit(v)
|
||||
return v
|
||||
}
|
||||
|
||||
// lookup returns the address of the named variable identified by obj
|
||||
// that is local to function f or one of its enclosing functions.
|
||||
// If escaping, the reference comes from a potentially escaping pointer
|
||||
// expression and the referent must be heap-allocated.
|
||||
//
|
||||
func (f *Function) lookup(obj types.Object, escaping bool) Value {
|
||||
if v, ok := f.objects[obj]; ok {
|
||||
if alloc, ok := v.(*Alloc); ok && escaping {
|
||||
alloc.Heap = true
|
||||
}
|
||||
return v // function-local var (address)
|
||||
}
|
||||
|
||||
// Definition must be in an enclosing function;
|
||||
// plumb it through intervening closures.
|
||||
if f.parent == nil {
|
||||
panic("no ssa.Value for " + obj.String())
|
||||
}
|
||||
outer := f.parent.lookup(obj, true) // escaping
|
||||
v := &FreeVar{
|
||||
name: obj.Name(),
|
||||
typ: outer.Type(),
|
||||
pos: outer.Pos(),
|
||||
outer: outer,
|
||||
parent: f,
|
||||
}
|
||||
f.objects[obj] = v
|
||||
f.FreeVars = append(f.FreeVars, v)
|
||||
return v
|
||||
}
|
||||
|
||||
// emit emits the specified instruction to function f.
|
||||
func (f *Function) emit(instr Instruction) Value {
|
||||
return f.currentBlock.emit(instr)
|
||||
}
|
||||
|
||||
// RelString returns the full name of this function, qualified by
|
||||
// package name, receiver type, etc.
|
||||
//
|
||||
// The specific formatting rules are not guaranteed and may change.
|
||||
//
|
||||
// Examples:
|
||||
// "math.IsNaN" // a package-level function
|
||||
// "(*bytes.Buffer).Bytes" // a declared method or a wrapper
|
||||
// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0)
|
||||
// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure)
|
||||
// "main.main$1" // an anonymous function in main
|
||||
// "main.init#1" // a declared init function
|
||||
// "main.init" // the synthesized package initializer
|
||||
//
|
||||
// When these functions are referred to from within the same package
|
||||
// (i.e. from == f.Pkg.Object), they are rendered without the package path.
|
||||
// For example: "IsNaN", "(*Buffer).Bytes", etc.
|
||||
//
|
||||
// All non-synthetic functions have distinct package-qualified names.
|
||||
// (But two methods may have the same name "(T).f" if one is a synthetic
|
||||
// wrapper promoting a non-exported method "f" from another package; in
|
||||
// that case, the strings are equal but the identifiers "f" are distinct.)
|
||||
//
|
||||
func (f *Function) RelString(from *types.Package) string {
|
||||
// Anonymous?
|
||||
if f.parent != nil {
|
||||
// An anonymous function's Name() looks like "parentName$1",
|
||||
// but its String() should include the type/package/etc.
|
||||
parent := f.parent.RelString(from)
|
||||
for i, anon := range f.parent.AnonFuncs {
|
||||
if anon == f {
|
||||
return fmt.Sprintf("%s$%d", parent, 1+i)
|
||||
}
|
||||
}
|
||||
|
||||
return f.name // should never happen
|
||||
}
|
||||
|
||||
// Method (declared or wrapper)?
|
||||
if recv := f.Signature.Recv(); recv != nil {
|
||||
return f.relMethod(from, recv.Type())
|
||||
}
|
||||
|
||||
// Thunk?
|
||||
if f.method != nil {
|
||||
return f.relMethod(from, f.method.Recv())
|
||||
}
|
||||
|
||||
// Bound?
|
||||
if len(f.FreeVars) == 1 && strings.HasSuffix(f.name, "$bound") {
|
||||
return f.relMethod(from, f.FreeVars[0].Type())
|
||||
}
|
||||
|
||||
// Package-level function?
|
||||
// Prefix with package name for cross-package references only.
|
||||
if p := f.pkg(); p != nil && p != from {
|
||||
return fmt.Sprintf("%s.%s", p.Path(), f.name)
|
||||
}
|
||||
|
||||
// Unknown.
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *Function) relMethod(from *types.Package, recv types.Type) string {
|
||||
return fmt.Sprintf("(%s).%s", relType(recv, from), f.name)
|
||||
}
|
||||
|
||||
// writeSignature writes to buf the signature sig in declaration syntax.
|
||||
func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature, params []*Parameter) {
|
||||
buf.WriteString("func ")
|
||||
if recv := sig.Recv(); recv != nil {
|
||||
buf.WriteString("(")
|
||||
if n := params[0].Name(); n != "" {
|
||||
buf.WriteString(n)
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
types.WriteType(buf, params[0].Type(), types.RelativeTo(from))
|
||||
buf.WriteString(") ")
|
||||
}
|
||||
buf.WriteString(name)
|
||||
types.WriteSignature(buf, sig, types.RelativeTo(from))
|
||||
}
|
||||
|
||||
func (f *Function) pkg() *types.Package {
|
||||
if f.Pkg != nil {
|
||||
return f.Pkg.Pkg
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ io.WriterTo = (*Function)(nil) // *Function implements io.Writer
|
||||
|
||||
func (f *Function) WriteTo(w io.Writer) (int64, error) {
|
||||
var buf bytes.Buffer
|
||||
WriteFunction(&buf, f)
|
||||
n, err := w.Write(buf.Bytes())
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
// WriteFunction writes to buf a human-readable "disassembly" of f.
|
||||
func WriteFunction(buf *bytes.Buffer, f *Function) {
|
||||
fmt.Fprintf(buf, "# Name: %s\n", f.String())
|
||||
if f.Pkg != nil {
|
||||
fmt.Fprintf(buf, "# Package: %s\n", f.Pkg.Pkg.Path())
|
||||
}
|
||||
if syn := f.Synthetic; syn != "" {
|
||||
fmt.Fprintln(buf, "# Synthetic:", syn)
|
||||
}
|
||||
if pos := f.Pos(); pos.IsValid() {
|
||||
fmt.Fprintf(buf, "# Location: %s\n", f.Prog.Fset.Position(pos))
|
||||
}
|
||||
|
||||
if f.parent != nil {
|
||||
fmt.Fprintf(buf, "# Parent: %s\n", f.parent.Name())
|
||||
}
|
||||
|
||||
if f.Recover != nil {
|
||||
fmt.Fprintf(buf, "# Recover: %s\n", f.Recover)
|
||||
}
|
||||
|
||||
from := f.pkg()
|
||||
|
||||
if f.FreeVars != nil {
|
||||
buf.WriteString("# Free variables:\n")
|
||||
for i, fv := range f.FreeVars {
|
||||
fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, fv.Name(), relType(fv.Type(), from))
|
||||
}
|
||||
}
|
||||
|
||||
if len(f.Locals) > 0 {
|
||||
buf.WriteString("# Locals:\n")
|
||||
for i, l := range f.Locals {
|
||||
fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(deref(l.Type()), from))
|
||||
}
|
||||
}
|
||||
writeSignature(buf, from, f.Name(), f.Signature, f.Params)
|
||||
buf.WriteString(":\n")
|
||||
|
||||
if f.Blocks == nil {
|
||||
buf.WriteString("\t(external)\n")
|
||||
}
|
||||
|
||||
// NB. column calculations are confused by non-ASCII
|
||||
// characters and assume 8-space tabs.
|
||||
const punchcard = 80 // for old time's sake.
|
||||
const tabwidth = 8
|
||||
for _, b := range f.Blocks {
|
||||
if b == nil {
|
||||
// Corrupt CFG.
|
||||
fmt.Fprintf(buf, ".nil:\n")
|
||||
continue
|
||||
}
|
||||
n, _ := fmt.Fprintf(buf, "%d:", b.Index)
|
||||
bmsg := fmt.Sprintf("%s P:%d S:%d", b.Comment, len(b.Preds), len(b.Succs))
|
||||
fmt.Fprintf(buf, "%*s%s\n", punchcard-1-n-len(bmsg), "", bmsg)
|
||||
|
||||
if false { // CFG debugging
|
||||
fmt.Fprintf(buf, "\t# CFG: %s --> %s --> %s\n", b.Preds, b, b.Succs)
|
||||
}
|
||||
for _, instr := range b.Instrs {
|
||||
buf.WriteString("\t")
|
||||
switch v := instr.(type) {
|
||||
case Value:
|
||||
l := punchcard - tabwidth
|
||||
// Left-align the instruction.
|
||||
if name := v.Name(); name != "" {
|
||||
n, _ := fmt.Fprintf(buf, "%s = ", name)
|
||||
l -= n
|
||||
}
|
||||
n, _ := buf.WriteString(instr.String())
|
||||
l -= n
|
||||
// Right-align the type if there's space.
|
||||
if t := v.Type(); t != nil {
|
||||
buf.WriteByte(' ')
|
||||
ts := relType(t, from)
|
||||
l -= len(ts) + len(" ") // (spaces before and after type)
|
||||
if l > 0 {
|
||||
fmt.Fprintf(buf, "%*s", l, "")
|
||||
}
|
||||
buf.WriteString(ts)
|
||||
}
|
||||
case nil:
|
||||
// Be robust against bad transforms.
|
||||
buf.WriteString("<deleted>")
|
||||
default:
|
||||
buf.WriteString(instr.String())
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(buf, "\n")
|
||||
}
|
||||
|
||||
// newBasicBlock adds to f a new basic block and returns it. It does
|
||||
// not automatically become the current block for subsequent calls to emit.
|
||||
// comment is an optional string for more readable debugging output.
|
||||
//
|
||||
func (f *Function) newBasicBlock(comment string) *BasicBlock {
|
||||
b := &BasicBlock{
|
||||
Index: len(f.Blocks),
|
||||
Comment: comment,
|
||||
parent: f,
|
||||
}
|
||||
b.Succs = b.succs2[:0]
|
||||
f.Blocks = append(f.Blocks, b)
|
||||
return b
|
||||
}
|
||||
|
||||
// NewFunction returns a new synthetic Function instance belonging to
|
||||
// prog, with its name and signature fields set as specified.
|
||||
//
|
||||
// The caller is responsible for initializing the remaining fields of
|
||||
// the function object, e.g. Pkg, Params, Blocks.
|
||||
//
|
||||
// It is practically impossible for clients to construct well-formed
|
||||
// SSA functions/packages/programs directly, so we assume this is the
|
||||
// job of the Builder alone. NewFunction exists to provide clients a
|
||||
// little flexibility. For example, analysis tools may wish to
|
||||
// construct fake Functions for the root of the callgraph, a fake
|
||||
// "reflect" package, etc.
|
||||
//
|
||||
// TODO(adonovan): think harder about the API here.
|
||||
//
|
||||
func (prog *Program) NewFunction(name string, sig *types.Signature, provenance string) *Function {
|
||||
return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance}
|
||||
}
|
||||
|
||||
type extentNode [2]token.Pos
|
||||
|
||||
func (n extentNode) Pos() token.Pos { return n[0] }
|
||||
func (n extentNode) End() token.Pos { return n[1] }
|
||||
|
||||
// Syntax returns an ast.Node whose Pos/End methods provide the
|
||||
// lexical extent of the function if it was defined by Go source code
|
||||
// (f.Synthetic==""), or nil otherwise.
|
||||
//
|
||||
// If f was built with debug information (see Package.SetDebugRef),
|
||||
// the result is the *ast.FuncDecl or *ast.FuncLit that declared the
|
||||
// function. Otherwise, it is an opaque Node providing only position
|
||||
// information; this avoids pinning the AST in memory.
|
||||
//
|
||||
func (f *Function) Syntax() ast.Node { return f.syntax }
|
||||
7
vendor/golang.org/x/tools/go/ssa/identical.go
generated
vendored
Normal file
7
vendor/golang.org/x/tools/go/ssa/identical.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
// +build go1.8
|
||||
|
||||
package ssa
|
||||
|
||||
import "go/types"
|
||||
|
||||
var structTypesIdentical = types.IdenticalIgnoreTags
|
||||
7
vendor/golang.org/x/tools/go/ssa/identical_17.go
generated
vendored
Normal file
7
vendor/golang.org/x/tools/go/ssa/identical_17.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
// +build !go1.8
|
||||
|
||||
package ssa
|
||||
|
||||
import "go/types"
|
||||
|
||||
var structTypesIdentical = types.Identical
|
||||
653
vendor/golang.org/x/tools/go/ssa/lift.go
generated
vendored
Normal file
653
vendor/golang.org/x/tools/go/ssa/lift.go
generated
vendored
Normal file
@@ -0,0 +1,653 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines the lifting pass which tries to "lift" Alloc
|
||||
// cells (new/local variables) into SSA registers, replacing loads
|
||||
// with the dominating stored value, eliminating loads and stores, and
|
||||
// inserting φ-nodes as needed.
|
||||
|
||||
// Cited papers and resources:
|
||||
//
|
||||
// Ron Cytron et al. 1991. Efficiently computing SSA form...
|
||||
// http://doi.acm.org/10.1145/115372.115320
|
||||
//
|
||||
// Cooper, Harvey, Kennedy. 2001. A Simple, Fast Dominance Algorithm.
|
||||
// Software Practice and Experience 2001, 4:1-10.
|
||||
// http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
|
||||
//
|
||||
// Daniel Berlin, llvmdev mailing list, 2012.
|
||||
// http://lists.cs.uiuc.edu/pipermail/llvmdev/2012-January/046638.html
|
||||
// (Be sure to expand the whole thread.)
|
||||
|
||||
// TODO(adonovan): opt: there are many optimizations worth evaluating, and
|
||||
// the conventional wisdom for SSA construction is that a simple
|
||||
// algorithm well engineered often beats those of better asymptotic
|
||||
// complexity on all but the most egregious inputs.
|
||||
//
|
||||
// Danny Berlin suggests that the Cooper et al. algorithm for
|
||||
// computing the dominance frontier is superior to Cytron et al.
|
||||
// Furthermore he recommends that rather than computing the DF for the
|
||||
// whole function then renaming all alloc cells, it may be cheaper to
|
||||
// compute the DF for each alloc cell separately and throw it away.
|
||||
//
|
||||
// Consider exploiting liveness information to avoid creating dead
|
||||
// φ-nodes which we then immediately remove.
|
||||
//
|
||||
// Also see many other "TODO: opt" suggestions in the code.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"math/big"
|
||||
"os"
|
||||
)
|
||||
|
||||
// If true, show diagnostic information at each step of lifting.
|
||||
// Very verbose.
|
||||
const debugLifting = false
|
||||
|
||||
// domFrontier maps each block to the set of blocks in its dominance
|
||||
// frontier. The outer slice is conceptually a map keyed by
|
||||
// Block.Index. The inner slice is conceptually a set, possibly
|
||||
// containing duplicates.
|
||||
//
|
||||
// TODO(adonovan): opt: measure impact of dups; consider a packed bit
|
||||
// representation, e.g. big.Int, and bitwise parallel operations for
|
||||
// the union step in the Children loop.
|
||||
//
|
||||
// domFrontier's methods mutate the slice's elements but not its
|
||||
// length, so their receivers needn't be pointers.
|
||||
//
|
||||
type domFrontier [][]*BasicBlock
|
||||
|
||||
func (df domFrontier) add(u, v *BasicBlock) {
|
||||
p := &df[u.Index]
|
||||
*p = append(*p, v)
|
||||
}
|
||||
|
||||
// build builds the dominance frontier df for the dominator (sub)tree
|
||||
// rooted at u, using the Cytron et al. algorithm.
|
||||
//
|
||||
// TODO(adonovan): opt: consider Berlin approach, computing pruned SSA
|
||||
// by pruning the entire IDF computation, rather than merely pruning
|
||||
// the DF -> IDF step.
|
||||
func (df domFrontier) build(u *BasicBlock) {
|
||||
// Encounter each node u in postorder of dom tree.
|
||||
for _, child := range u.dom.children {
|
||||
df.build(child)
|
||||
}
|
||||
for _, vb := range u.Succs {
|
||||
if v := vb.dom; v.idom != u {
|
||||
df.add(u, vb)
|
||||
}
|
||||
}
|
||||
for _, w := range u.dom.children {
|
||||
for _, vb := range df[w.Index] {
|
||||
// TODO(adonovan): opt: use word-parallel bitwise union.
|
||||
if v := vb.dom; v.idom != u {
|
||||
df.add(u, vb)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func buildDomFrontier(fn *Function) domFrontier {
|
||||
df := make(domFrontier, len(fn.Blocks))
|
||||
df.build(fn.Blocks[0])
|
||||
if fn.Recover != nil {
|
||||
df.build(fn.Recover)
|
||||
}
|
||||
return df
|
||||
}
|
||||
|
||||
func removeInstr(refs []Instruction, instr Instruction) []Instruction {
|
||||
i := 0
|
||||
for _, ref := range refs {
|
||||
if ref == instr {
|
||||
continue
|
||||
}
|
||||
refs[i] = ref
|
||||
i++
|
||||
}
|
||||
for j := i; j != len(refs); j++ {
|
||||
refs[j] = nil // aid GC
|
||||
}
|
||||
return refs[:i]
|
||||
}
|
||||
|
||||
// lift replaces local and new Allocs accessed only with
|
||||
// load/store by SSA registers, inserting φ-nodes where necessary.
|
||||
// The result is a program in classical pruned SSA form.
|
||||
//
|
||||
// Preconditions:
|
||||
// - fn has no dead blocks (blockopt has run).
|
||||
// - Def/use info (Operands and Referrers) is up-to-date.
|
||||
// - The dominator tree is up-to-date.
|
||||
//
|
||||
func lift(fn *Function) {
|
||||
// TODO(adonovan): opt: lots of little optimizations may be
|
||||
// worthwhile here, especially if they cause us to avoid
|
||||
// buildDomFrontier. For example:
|
||||
//
|
||||
// - Alloc never loaded? Eliminate.
|
||||
// - Alloc never stored? Replace all loads with a zero constant.
|
||||
// - Alloc stored once? Replace loads with dominating store;
|
||||
// don't forget that an Alloc is itself an effective store
|
||||
// of zero.
|
||||
// - Alloc used only within a single block?
|
||||
// Use degenerate algorithm avoiding φ-nodes.
|
||||
// - Consider synergy with scalar replacement of aggregates (SRA).
|
||||
// e.g. *(&x.f) where x is an Alloc.
|
||||
// Perhaps we'd get better results if we generated this as x.f
|
||||
// i.e. Field(x, .f) instead of Load(FieldIndex(x, .f)).
|
||||
// Unclear.
|
||||
//
|
||||
// But we will start with the simplest correct code.
|
||||
df := buildDomFrontier(fn)
|
||||
|
||||
if debugLifting {
|
||||
title := false
|
||||
for i, blocks := range df {
|
||||
if blocks != nil {
|
||||
if !title {
|
||||
fmt.Fprintf(os.Stderr, "Dominance frontier of %s:\n", fn)
|
||||
title = true
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "\t%s: %s\n", fn.Blocks[i], blocks)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newPhis := make(newPhiMap)
|
||||
|
||||
// During this pass we will replace some BasicBlock.Instrs
|
||||
// (allocs, loads and stores) with nil, keeping a count in
|
||||
// BasicBlock.gaps. At the end we will reset Instrs to the
|
||||
// concatenation of all non-dead newPhis and non-nil Instrs
|
||||
// for the block, reusing the original array if space permits.
|
||||
|
||||
// While we're here, we also eliminate 'rundefers'
|
||||
// instructions in functions that contain no 'defer'
|
||||
// instructions.
|
||||
usesDefer := false
|
||||
|
||||
// A counter used to generate ~unique ids for Phi nodes, as an
|
||||
// aid to debugging. We use large numbers to make them highly
|
||||
// visible. All nodes are renumbered later.
|
||||
fresh := 1000
|
||||
|
||||
// Determine which allocs we can lift and number them densely.
|
||||
// The renaming phase uses this numbering for compact maps.
|
||||
numAllocs := 0
|
||||
for _, b := range fn.Blocks {
|
||||
b.gaps = 0
|
||||
b.rundefers = 0
|
||||
for _, instr := range b.Instrs {
|
||||
switch instr := instr.(type) {
|
||||
case *Alloc:
|
||||
index := -1
|
||||
if liftAlloc(df, instr, newPhis, &fresh) {
|
||||
index = numAllocs
|
||||
numAllocs++
|
||||
}
|
||||
instr.index = index
|
||||
case *Defer:
|
||||
usesDefer = true
|
||||
case *RunDefers:
|
||||
b.rundefers++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// renaming maps an alloc (keyed by index) to its replacement
|
||||
// value. Initially the renaming contains nil, signifying the
|
||||
// zero constant of the appropriate type; we construct the
|
||||
// Const lazily at most once on each path through the domtree.
|
||||
// TODO(adonovan): opt: cache per-function not per subtree.
|
||||
renaming := make([]Value, numAllocs)
|
||||
|
||||
// Renaming.
|
||||
rename(fn.Blocks[0], renaming, newPhis)
|
||||
|
||||
// Eliminate dead φ-nodes.
|
||||
removeDeadPhis(fn.Blocks, newPhis)
|
||||
|
||||
// Prepend remaining live φ-nodes to each block.
|
||||
for _, b := range fn.Blocks {
|
||||
nps := newPhis[b]
|
||||
j := len(nps)
|
||||
|
||||
rundefersToKill := b.rundefers
|
||||
if usesDefer {
|
||||
rundefersToKill = 0
|
||||
}
|
||||
|
||||
if j+b.gaps+rundefersToKill == 0 {
|
||||
continue // fast path: no new phis or gaps
|
||||
}
|
||||
|
||||
// Compact nps + non-nil Instrs into a new slice.
|
||||
// TODO(adonovan): opt: compact in situ (rightwards)
|
||||
// if Instrs has sufficient space or slack.
|
||||
dst := make([]Instruction, len(b.Instrs)+j-b.gaps-rundefersToKill)
|
||||
for i, np := range nps {
|
||||
dst[i] = np.phi
|
||||
}
|
||||
for _, instr := range b.Instrs {
|
||||
if instr == nil {
|
||||
continue
|
||||
}
|
||||
if !usesDefer {
|
||||
if _, ok := instr.(*RunDefers); ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
dst[j] = instr
|
||||
j++
|
||||
}
|
||||
b.Instrs = dst
|
||||
}
|
||||
|
||||
// Remove any fn.Locals that were lifted.
|
||||
j := 0
|
||||
for _, l := range fn.Locals {
|
||||
if l.index < 0 {
|
||||
fn.Locals[j] = l
|
||||
j++
|
||||
}
|
||||
}
|
||||
// Nil out fn.Locals[j:] to aid GC.
|
||||
for i := j; i < len(fn.Locals); i++ {
|
||||
fn.Locals[i] = nil
|
||||
}
|
||||
fn.Locals = fn.Locals[:j]
|
||||
}
|
||||
|
||||
// removeDeadPhis removes φ-nodes not transitively needed by a
|
||||
// non-Phi, non-DebugRef instruction.
|
||||
func removeDeadPhis(blocks []*BasicBlock, newPhis newPhiMap) {
|
||||
// First pass: find the set of "live" φ-nodes: those reachable
|
||||
// from some non-Phi instruction.
|
||||
//
|
||||
// We compute reachability in reverse, starting from each φ,
|
||||
// rather than forwards, starting from each live non-Phi
|
||||
// instruction, because this way visits much less of the
|
||||
// Value graph.
|
||||
livePhis := make(map[*Phi]bool)
|
||||
for _, npList := range newPhis {
|
||||
for _, np := range npList {
|
||||
phi := np.phi
|
||||
if !livePhis[phi] && phiHasDirectReferrer(phi) {
|
||||
markLivePhi(livePhis, phi)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Existing φ-nodes due to && and || operators
|
||||
// are all considered live (see Go issue 19622).
|
||||
for _, b := range blocks {
|
||||
for _, phi := range b.phis() {
|
||||
markLivePhi(livePhis, phi.(*Phi))
|
||||
}
|
||||
}
|
||||
|
||||
// Second pass: eliminate unused phis from newPhis.
|
||||
for block, npList := range newPhis {
|
||||
j := 0
|
||||
for _, np := range npList {
|
||||
if livePhis[np.phi] {
|
||||
npList[j] = np
|
||||
j++
|
||||
} else {
|
||||
// discard it, first removing it from referrers
|
||||
for _, val := range np.phi.Edges {
|
||||
if refs := val.Referrers(); refs != nil {
|
||||
*refs = removeInstr(*refs, np.phi)
|
||||
}
|
||||
}
|
||||
np.phi.block = nil
|
||||
}
|
||||
}
|
||||
newPhis[block] = npList[:j]
|
||||
}
|
||||
}
|
||||
|
||||
// markLivePhi marks phi, and all φ-nodes transitively reachable via
|
||||
// its Operands, live.
|
||||
func markLivePhi(livePhis map[*Phi]bool, phi *Phi) {
|
||||
livePhis[phi] = true
|
||||
for _, rand := range phi.Operands(nil) {
|
||||
if q, ok := (*rand).(*Phi); ok {
|
||||
if !livePhis[q] {
|
||||
markLivePhi(livePhis, q)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// phiHasDirectReferrer reports whether phi is directly referred to by
|
||||
// a non-Phi instruction. Such instructions are the
|
||||
// roots of the liveness traversal.
|
||||
func phiHasDirectReferrer(phi *Phi) bool {
|
||||
for _, instr := range *phi.Referrers() {
|
||||
if _, ok := instr.(*Phi); !ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type blockSet struct{ big.Int } // (inherit methods from Int)
|
||||
|
||||
// add adds b to the set and returns true if the set changed.
|
||||
func (s *blockSet) add(b *BasicBlock) bool {
|
||||
i := b.Index
|
||||
if s.Bit(i) != 0 {
|
||||
return false
|
||||
}
|
||||
s.SetBit(&s.Int, i, 1)
|
||||
return true
|
||||
}
|
||||
|
||||
// take removes an arbitrary element from a set s and
|
||||
// returns its index, or returns -1 if empty.
|
||||
func (s *blockSet) take() int {
|
||||
l := s.BitLen()
|
||||
for i := 0; i < l; i++ {
|
||||
if s.Bit(i) == 1 {
|
||||
s.SetBit(&s.Int, i, 0)
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// newPhi is a pair of a newly introduced φ-node and the lifted Alloc
|
||||
// it replaces.
|
||||
type newPhi struct {
|
||||
phi *Phi
|
||||
alloc *Alloc
|
||||
}
|
||||
|
||||
// newPhiMap records for each basic block, the set of newPhis that
|
||||
// must be prepended to the block.
|
||||
type newPhiMap map[*BasicBlock][]newPhi
|
||||
|
||||
// liftAlloc determines whether alloc can be lifted into registers,
|
||||
// and if so, it populates newPhis with all the φ-nodes it may require
|
||||
// and returns true.
|
||||
//
|
||||
// fresh is a source of fresh ids for phi nodes.
|
||||
//
|
||||
func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool {
|
||||
// Don't lift aggregates into registers, because we don't have
|
||||
// a way to express their zero-constants.
|
||||
switch deref(alloc.Type()).Underlying().(type) {
|
||||
case *types.Array, *types.Struct:
|
||||
return false
|
||||
}
|
||||
|
||||
// Don't lift named return values in functions that defer
|
||||
// calls that may recover from panic.
|
||||
if fn := alloc.Parent(); fn.Recover != nil {
|
||||
for _, nr := range fn.namedResults {
|
||||
if nr == alloc {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute defblocks, the set of blocks containing a
|
||||
// definition of the alloc cell.
|
||||
var defblocks blockSet
|
||||
for _, instr := range *alloc.Referrers() {
|
||||
// Bail out if we discover the alloc is not liftable;
|
||||
// the only operations permitted to use the alloc are
|
||||
// loads/stores into the cell, and DebugRef.
|
||||
switch instr := instr.(type) {
|
||||
case *Store:
|
||||
if instr.Val == alloc {
|
||||
return false // address used as value
|
||||
}
|
||||
if instr.Addr != alloc {
|
||||
panic("Alloc.Referrers is inconsistent")
|
||||
}
|
||||
defblocks.add(instr.Block())
|
||||
case *UnOp:
|
||||
if instr.Op != token.MUL {
|
||||
return false // not a load
|
||||
}
|
||||
if instr.X != alloc {
|
||||
panic("Alloc.Referrers is inconsistent")
|
||||
}
|
||||
case *DebugRef:
|
||||
// ok
|
||||
default:
|
||||
return false // some other instruction
|
||||
}
|
||||
}
|
||||
// The Alloc itself counts as a (zero) definition of the cell.
|
||||
defblocks.add(alloc.Block())
|
||||
|
||||
if debugLifting {
|
||||
fmt.Fprintln(os.Stderr, "\tlifting ", alloc, alloc.Name())
|
||||
}
|
||||
|
||||
fn := alloc.Parent()
|
||||
|
||||
// Φ-insertion.
|
||||
//
|
||||
// What follows is the body of the main loop of the insert-φ
|
||||
// function described by Cytron et al, but instead of using
|
||||
// counter tricks, we just reset the 'hasAlready' and 'work'
|
||||
// sets each iteration. These are bitmaps so it's pretty cheap.
|
||||
//
|
||||
// TODO(adonovan): opt: recycle slice storage for W,
|
||||
// hasAlready, defBlocks across liftAlloc calls.
|
||||
var hasAlready blockSet
|
||||
|
||||
// Initialize W and work to defblocks.
|
||||
var work blockSet = defblocks // blocks seen
|
||||
var W blockSet // blocks to do
|
||||
W.Set(&defblocks.Int)
|
||||
|
||||
// Traverse iterated dominance frontier, inserting φ-nodes.
|
||||
for i := W.take(); i != -1; i = W.take() {
|
||||
u := fn.Blocks[i]
|
||||
for _, v := range df[u.Index] {
|
||||
if hasAlready.add(v) {
|
||||
// Create φ-node.
|
||||
// It will be prepended to v.Instrs later, if needed.
|
||||
phi := &Phi{
|
||||
Edges: make([]Value, len(v.Preds)),
|
||||
Comment: alloc.Comment,
|
||||
}
|
||||
// This is merely a debugging aid:
|
||||
phi.setNum(*fresh)
|
||||
*fresh++
|
||||
|
||||
phi.pos = alloc.Pos()
|
||||
phi.setType(deref(alloc.Type()))
|
||||
phi.block = v
|
||||
if debugLifting {
|
||||
fmt.Fprintf(os.Stderr, "\tplace %s = %s at block %s\n", phi.Name(), phi, v)
|
||||
}
|
||||
newPhis[v] = append(newPhis[v], newPhi{phi, alloc})
|
||||
|
||||
if work.add(v) {
|
||||
W.add(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// replaceAll replaces all intraprocedural uses of x with y,
|
||||
// updating x.Referrers and y.Referrers.
|
||||
// Precondition: x.Referrers() != nil, i.e. x must be local to some function.
|
||||
//
|
||||
func replaceAll(x, y Value) {
|
||||
var rands []*Value
|
||||
pxrefs := x.Referrers()
|
||||
pyrefs := y.Referrers()
|
||||
for _, instr := range *pxrefs {
|
||||
rands = instr.Operands(rands[:0]) // recycle storage
|
||||
for _, rand := range rands {
|
||||
if *rand != nil {
|
||||
if *rand == x {
|
||||
*rand = y
|
||||
}
|
||||
}
|
||||
}
|
||||
if pyrefs != nil {
|
||||
*pyrefs = append(*pyrefs, instr) // dups ok
|
||||
}
|
||||
}
|
||||
*pxrefs = nil // x is now unreferenced
|
||||
}
|
||||
|
||||
// renamed returns the value to which alloc is being renamed,
|
||||
// constructing it lazily if it's the implicit zero initialization.
|
||||
//
|
||||
func renamed(renaming []Value, alloc *Alloc) Value {
|
||||
v := renaming[alloc.index]
|
||||
if v == nil {
|
||||
v = zeroConst(deref(alloc.Type()))
|
||||
renaming[alloc.index] = v
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// rename implements the (Cytron et al) SSA renaming algorithm, a
|
||||
// preorder traversal of the dominator tree replacing all loads of
|
||||
// Alloc cells with the value stored to that cell by the dominating
|
||||
// store instruction. For lifting, we need only consider loads,
|
||||
// stores and φ-nodes.
|
||||
//
|
||||
// renaming is a map from *Alloc (keyed by index number) to its
|
||||
// dominating stored value; newPhis[x] is the set of new φ-nodes to be
|
||||
// prepended to block x.
|
||||
//
|
||||
func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap) {
|
||||
// Each φ-node becomes the new name for its associated Alloc.
|
||||
for _, np := range newPhis[u] {
|
||||
phi := np.phi
|
||||
alloc := np.alloc
|
||||
renaming[alloc.index] = phi
|
||||
}
|
||||
|
||||
// Rename loads and stores of allocs.
|
||||
for i, instr := range u.Instrs {
|
||||
switch instr := instr.(type) {
|
||||
case *Alloc:
|
||||
if instr.index >= 0 { // store of zero to Alloc cell
|
||||
// Replace dominated loads by the zero value.
|
||||
renaming[instr.index] = nil
|
||||
if debugLifting {
|
||||
fmt.Fprintf(os.Stderr, "\tkill alloc %s\n", instr)
|
||||
}
|
||||
// Delete the Alloc.
|
||||
u.Instrs[i] = nil
|
||||
u.gaps++
|
||||
}
|
||||
|
||||
case *Store:
|
||||
if alloc, ok := instr.Addr.(*Alloc); ok && alloc.index >= 0 { // store to Alloc cell
|
||||
// Replace dominated loads by the stored value.
|
||||
renaming[alloc.index] = instr.Val
|
||||
if debugLifting {
|
||||
fmt.Fprintf(os.Stderr, "\tkill store %s; new value: %s\n",
|
||||
instr, instr.Val.Name())
|
||||
}
|
||||
// Remove the store from the referrer list of the stored value.
|
||||
if refs := instr.Val.Referrers(); refs != nil {
|
||||
*refs = removeInstr(*refs, instr)
|
||||
}
|
||||
// Delete the Store.
|
||||
u.Instrs[i] = nil
|
||||
u.gaps++
|
||||
}
|
||||
|
||||
case *UnOp:
|
||||
if instr.Op == token.MUL {
|
||||
if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // load of Alloc cell
|
||||
newval := renamed(renaming, alloc)
|
||||
if debugLifting {
|
||||
fmt.Fprintf(os.Stderr, "\tupdate load %s = %s with %s\n",
|
||||
instr.Name(), instr, newval.Name())
|
||||
}
|
||||
// Replace all references to
|
||||
// the loaded value by the
|
||||
// dominating stored value.
|
||||
replaceAll(instr, newval)
|
||||
// Delete the Load.
|
||||
u.Instrs[i] = nil
|
||||
u.gaps++
|
||||
}
|
||||
}
|
||||
|
||||
case *DebugRef:
|
||||
if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // ref of Alloc cell
|
||||
if instr.IsAddr {
|
||||
instr.X = renamed(renaming, alloc)
|
||||
instr.IsAddr = false
|
||||
|
||||
// Add DebugRef to instr.X's referrers.
|
||||
if refs := instr.X.Referrers(); refs != nil {
|
||||
*refs = append(*refs, instr)
|
||||
}
|
||||
} else {
|
||||
// A source expression denotes the address
|
||||
// of an Alloc that was optimized away.
|
||||
instr.X = nil
|
||||
|
||||
// Delete the DebugRef.
|
||||
u.Instrs[i] = nil
|
||||
u.gaps++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For each φ-node in a CFG successor, rename the edge.
|
||||
for _, v := range u.Succs {
|
||||
phis := newPhis[v]
|
||||
if len(phis) == 0 {
|
||||
continue
|
||||
}
|
||||
i := v.predIndex(u)
|
||||
for _, np := range phis {
|
||||
phi := np.phi
|
||||
alloc := np.alloc
|
||||
newval := renamed(renaming, alloc)
|
||||
if debugLifting {
|
||||
fmt.Fprintf(os.Stderr, "\tsetphi %s edge %s -> %s (#%d) (alloc=%s) := %s\n",
|
||||
phi.Name(), u, v, i, alloc.Name(), newval.Name())
|
||||
}
|
||||
phi.Edges[i] = newval
|
||||
if prefs := newval.Referrers(); prefs != nil {
|
||||
*prefs = append(*prefs, phi)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Continue depth-first recursion over domtree, pushing a
|
||||
// fresh copy of the renaming map for each subtree.
|
||||
for i, v := range u.dom.children {
|
||||
r := renaming
|
||||
if i < len(u.dom.children)-1 {
|
||||
// On all but the final iteration, we must make
|
||||
// a copy to avoid destructive update.
|
||||
r = make([]Value, len(renaming))
|
||||
copy(r, renaming)
|
||||
}
|
||||
rename(v, r, newPhis)
|
||||
}
|
||||
|
||||
}
|
||||
120
vendor/golang.org/x/tools/go/ssa/lvalue.go
generated
vendored
Normal file
120
vendor/golang.org/x/tools/go/ssa/lvalue.go
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// lvalues are the union of addressable expressions and map-index
|
||||
// expressions.
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// An lvalue represents an assignable location that may appear on the
|
||||
// left-hand side of an assignment. This is a generalization of a
|
||||
// pointer to permit updates to elements of maps.
|
||||
//
|
||||
type lvalue interface {
|
||||
store(fn *Function, v Value) // stores v into the location
|
||||
load(fn *Function) Value // loads the contents of the location
|
||||
address(fn *Function) Value // address of the location
|
||||
typ() types.Type // returns the type of the location
|
||||
}
|
||||
|
||||
// An address is an lvalue represented by a true pointer.
|
||||
type address struct {
|
||||
addr Value
|
||||
pos token.Pos // source position
|
||||
expr ast.Expr // source syntax of the value (not address) [debug mode]
|
||||
}
|
||||
|
||||
func (a *address) load(fn *Function) Value {
|
||||
load := emitLoad(fn, a.addr)
|
||||
load.pos = a.pos
|
||||
return load
|
||||
}
|
||||
|
||||
func (a *address) store(fn *Function, v Value) {
|
||||
store := emitStore(fn, a.addr, v, a.pos)
|
||||
if a.expr != nil {
|
||||
// store.Val is v, converted for assignability.
|
||||
emitDebugRef(fn, a.expr, store.Val, false)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *address) address(fn *Function) Value {
|
||||
if a.expr != nil {
|
||||
emitDebugRef(fn, a.expr, a.addr, true)
|
||||
}
|
||||
return a.addr
|
||||
}
|
||||
|
||||
func (a *address) typ() types.Type {
|
||||
return deref(a.addr.Type())
|
||||
}
|
||||
|
||||
// An element is an lvalue represented by m[k], the location of an
|
||||
// element of a map or string. These locations are not addressable
|
||||
// since pointers cannot be formed from them, but they do support
|
||||
// load(), and in the case of maps, store().
|
||||
//
|
||||
type element struct {
|
||||
m, k Value // map or string
|
||||
t types.Type // map element type or string byte type
|
||||
pos token.Pos // source position of colon ({k:v}) or lbrack (m[k]=v)
|
||||
}
|
||||
|
||||
func (e *element) load(fn *Function) Value {
|
||||
l := &Lookup{
|
||||
X: e.m,
|
||||
Index: e.k,
|
||||
}
|
||||
l.setPos(e.pos)
|
||||
l.setType(e.t)
|
||||
return fn.emit(l)
|
||||
}
|
||||
|
||||
func (e *element) store(fn *Function, v Value) {
|
||||
up := &MapUpdate{
|
||||
Map: e.m,
|
||||
Key: e.k,
|
||||
Value: emitConv(fn, v, e.t),
|
||||
}
|
||||
up.pos = e.pos
|
||||
fn.emit(up)
|
||||
}
|
||||
|
||||
func (e *element) address(fn *Function) Value {
|
||||
panic("map/string elements are not addressable")
|
||||
}
|
||||
|
||||
func (e *element) typ() types.Type {
|
||||
return e.t
|
||||
}
|
||||
|
||||
// A blank is a dummy variable whose name is "_".
|
||||
// It is not reified: loads are illegal and stores are ignored.
|
||||
//
|
||||
type blank struct{}
|
||||
|
||||
func (bl blank) load(fn *Function) Value {
|
||||
panic("blank.load is illegal")
|
||||
}
|
||||
|
||||
func (bl blank) store(fn *Function, v Value) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
func (bl blank) address(fn *Function) Value {
|
||||
panic("blank var is not addressable")
|
||||
}
|
||||
|
||||
func (bl blank) typ() types.Type {
|
||||
// This should be the type of the blank Ident; the typechecker
|
||||
// doesn't provide this yet, but fortunately, we don't need it
|
||||
// yet either.
|
||||
panic("blank.typ is unimplemented")
|
||||
}
|
||||
239
vendor/golang.org/x/tools/go/ssa/methods.go
generated
vendored
Normal file
239
vendor/golang.org/x/tools/go/ssa/methods.go
generated
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines utilities for population of method sets.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// MethodValue returns the Function implementing method sel, building
|
||||
// wrapper methods on demand. It returns nil if sel denotes an
|
||||
// abstract (interface) method.
|
||||
//
|
||||
// Precondition: sel.Kind() == MethodVal.
|
||||
//
|
||||
// Thread-safe.
|
||||
//
|
||||
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
|
||||
//
|
||||
func (prog *Program) MethodValue(sel *types.Selection) *Function {
|
||||
if sel.Kind() != types.MethodVal {
|
||||
panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel))
|
||||
}
|
||||
T := sel.Recv()
|
||||
if isInterface(T) {
|
||||
return nil // abstract method
|
||||
}
|
||||
if prog.mode&LogSource != 0 {
|
||||
defer logStack("MethodValue %s %v", T, sel)()
|
||||
}
|
||||
|
||||
prog.methodsMu.Lock()
|
||||
defer prog.methodsMu.Unlock()
|
||||
|
||||
return prog.addMethod(prog.createMethodSet(T), sel)
|
||||
}
|
||||
|
||||
// LookupMethod returns the implementation of the method of type T
|
||||
// identified by (pkg, name). It returns nil if the method exists but
|
||||
// is abstract, and panics if T has no such method.
|
||||
//
|
||||
func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function {
|
||||
sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name)
|
||||
if sel == nil {
|
||||
panic(fmt.Sprintf("%s has no method %s", T, types.Id(pkg, name)))
|
||||
}
|
||||
return prog.MethodValue(sel)
|
||||
}
|
||||
|
||||
// methodSet contains the (concrete) methods of a non-interface type.
|
||||
type methodSet struct {
|
||||
mapping map[string]*Function // populated lazily
|
||||
complete bool // mapping contains all methods
|
||||
}
|
||||
|
||||
// Precondition: !isInterface(T).
|
||||
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
|
||||
func (prog *Program) createMethodSet(T types.Type) *methodSet {
|
||||
mset, ok := prog.methodSets.At(T).(*methodSet)
|
||||
if !ok {
|
||||
mset = &methodSet{mapping: make(map[string]*Function)}
|
||||
prog.methodSets.Set(T, mset)
|
||||
}
|
||||
return mset
|
||||
}
|
||||
|
||||
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
|
||||
func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function {
|
||||
if sel.Kind() == types.MethodExpr {
|
||||
panic(sel)
|
||||
}
|
||||
id := sel.Obj().Id()
|
||||
fn := mset.mapping[id]
|
||||
if fn == nil {
|
||||
obj := sel.Obj().(*types.Func)
|
||||
|
||||
needsPromotion := len(sel.Index()) > 1
|
||||
needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.Recv())
|
||||
if needsPromotion || needsIndirection {
|
||||
fn = makeWrapper(prog, sel)
|
||||
} else {
|
||||
fn = prog.declaredFunc(obj)
|
||||
}
|
||||
if fn.Signature.Recv() == nil {
|
||||
panic(fn) // missing receiver
|
||||
}
|
||||
mset.mapping[id] = fn
|
||||
}
|
||||
return fn
|
||||
}
|
||||
|
||||
// RuntimeTypes returns a new unordered slice containing all
|
||||
// concrete types in the program for which a complete (non-empty)
|
||||
// method set is required at run-time.
|
||||
//
|
||||
// Thread-safe.
|
||||
//
|
||||
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
|
||||
//
|
||||
func (prog *Program) RuntimeTypes() []types.Type {
|
||||
prog.methodsMu.Lock()
|
||||
defer prog.methodsMu.Unlock()
|
||||
|
||||
var res []types.Type
|
||||
prog.methodSets.Iterate(func(T types.Type, v interface{}) {
|
||||
if v.(*methodSet).complete {
|
||||
res = append(res, T)
|
||||
}
|
||||
})
|
||||
return res
|
||||
}
|
||||
|
||||
// declaredFunc returns the concrete function/method denoted by obj.
|
||||
// Panic ensues if there is none.
|
||||
//
|
||||
func (prog *Program) declaredFunc(obj *types.Func) *Function {
|
||||
if v := prog.packageLevelValue(obj); v != nil {
|
||||
return v.(*Function)
|
||||
}
|
||||
panic("no concrete method: " + obj.String())
|
||||
}
|
||||
|
||||
// needMethodsOf ensures that runtime type information (including the
|
||||
// complete method set) is available for the specified type T and all
|
||||
// its subcomponents.
|
||||
//
|
||||
// needMethodsOf must be called for at least every type that is an
|
||||
// operand of some MakeInterface instruction, and for the type of
|
||||
// every exported package member.
|
||||
//
|
||||
// Precondition: T is not a method signature (*Signature with Recv()!=nil).
|
||||
//
|
||||
// Thread-safe. (Called via emitConv from multiple builder goroutines.)
|
||||
//
|
||||
// TODO(adonovan): make this faster. It accounts for 20% of SSA build time.
|
||||
//
|
||||
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
|
||||
//
|
||||
func (prog *Program) needMethodsOf(T types.Type) {
|
||||
prog.methodsMu.Lock()
|
||||
prog.needMethods(T, false)
|
||||
prog.methodsMu.Unlock()
|
||||
}
|
||||
|
||||
// Precondition: T is not a method signature (*Signature with Recv()!=nil).
|
||||
// Recursive case: skip => don't create methods for T.
|
||||
//
|
||||
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
|
||||
//
|
||||
func (prog *Program) needMethods(T types.Type, skip bool) {
|
||||
// Each package maintains its own set of types it has visited.
|
||||
if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok {
|
||||
// needMethods(T) was previously called
|
||||
if !prevSkip || skip {
|
||||
return // already seen, with same or false 'skip' value
|
||||
}
|
||||
}
|
||||
prog.runtimeTypes.Set(T, skip)
|
||||
|
||||
tmset := prog.MethodSets.MethodSet(T)
|
||||
|
||||
if !skip && !isInterface(T) && tmset.Len() > 0 {
|
||||
// Create methods of T.
|
||||
mset := prog.createMethodSet(T)
|
||||
if !mset.complete {
|
||||
mset.complete = true
|
||||
n := tmset.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
prog.addMethod(mset, tmset.At(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Recursion over signatures of each method.
|
||||
for i := 0; i < tmset.Len(); i++ {
|
||||
sig := tmset.At(i).Type().(*types.Signature)
|
||||
prog.needMethods(sig.Params(), false)
|
||||
prog.needMethods(sig.Results(), false)
|
||||
}
|
||||
|
||||
switch t := T.(type) {
|
||||
case *types.Basic:
|
||||
// nop
|
||||
|
||||
case *types.Interface:
|
||||
// nop---handled by recursion over method set.
|
||||
|
||||
case *types.Pointer:
|
||||
prog.needMethods(t.Elem(), false)
|
||||
|
||||
case *types.Slice:
|
||||
prog.needMethods(t.Elem(), false)
|
||||
|
||||
case *types.Chan:
|
||||
prog.needMethods(t.Elem(), false)
|
||||
|
||||
case *types.Map:
|
||||
prog.needMethods(t.Key(), false)
|
||||
prog.needMethods(t.Elem(), false)
|
||||
|
||||
case *types.Signature:
|
||||
if t.Recv() != nil {
|
||||
panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv()))
|
||||
}
|
||||
prog.needMethods(t.Params(), false)
|
||||
prog.needMethods(t.Results(), false)
|
||||
|
||||
case *types.Named:
|
||||
// A pointer-to-named type can be derived from a named
|
||||
// type via reflection. It may have methods too.
|
||||
prog.needMethods(types.NewPointer(T), false)
|
||||
|
||||
// Consider 'type T struct{S}' where S has methods.
|
||||
// Reflection provides no way to get from T to struct{S},
|
||||
// only to S, so the method set of struct{S} is unwanted,
|
||||
// so set 'skip' flag during recursion.
|
||||
prog.needMethods(t.Underlying(), true)
|
||||
|
||||
case *types.Array:
|
||||
prog.needMethods(t.Elem(), false)
|
||||
|
||||
case *types.Struct:
|
||||
for i, n := 0, t.NumFields(); i < n; i++ {
|
||||
prog.needMethods(t.Field(i).Type(), false)
|
||||
}
|
||||
|
||||
case *types.Tuple:
|
||||
for i, n := 0, t.Len(); i < n; i++ {
|
||||
prog.needMethods(t.At(i).Type(), false)
|
||||
}
|
||||
|
||||
default:
|
||||
panic(T)
|
||||
}
|
||||
}
|
||||
100
vendor/golang.org/x/tools/go/ssa/mode.go
generated
vendored
Normal file
100
vendor/golang.org/x/tools/go/ssa/mode.go
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines the BuilderMode type and its command-line flag.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// BuilderMode is a bitmask of options for diagnostics and checking.
|
||||
//
|
||||
// *BuilderMode satisfies the flag.Value interface. Example:
|
||||
//
|
||||
// var mode = ssa.BuilderMode(0)
|
||||
// func init() { flag.Var(&mode, "build", ssa.BuilderModeDoc) }
|
||||
//
|
||||
type BuilderMode uint
|
||||
|
||||
const (
|
||||
PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout
|
||||
PrintFunctions // Print function SSA code to stdout
|
||||
LogSource // Log source locations as SSA builder progresses
|
||||
SanityCheckFunctions // Perform sanity checking of function bodies
|
||||
NaiveForm // Build naïve SSA form: don't replace local loads/stores with registers
|
||||
BuildSerially // Build packages serially, not in parallel.
|
||||
GlobalDebug // Enable debug info for all packages
|
||||
BareInits // Build init functions without guards or calls to dependent inits
|
||||
)
|
||||
|
||||
const BuilderModeDoc = `Options controlling the SSA builder.
|
||||
The value is a sequence of zero or more of these letters:
|
||||
C perform sanity [C]hecking of the SSA form.
|
||||
D include [D]ebug info for every function.
|
||||
P print [P]ackage inventory.
|
||||
F print [F]unction SSA code.
|
||||
S log [S]ource locations as SSA builder progresses.
|
||||
L build distinct packages seria[L]ly instead of in parallel.
|
||||
N build [N]aive SSA form: don't replace local loads/stores with registers.
|
||||
I build bare [I]nit functions: no init guards or calls to dependent inits.
|
||||
`
|
||||
|
||||
func (m BuilderMode) String() string {
|
||||
var buf bytes.Buffer
|
||||
if m&GlobalDebug != 0 {
|
||||
buf.WriteByte('D')
|
||||
}
|
||||
if m&PrintPackages != 0 {
|
||||
buf.WriteByte('P')
|
||||
}
|
||||
if m&PrintFunctions != 0 {
|
||||
buf.WriteByte('F')
|
||||
}
|
||||
if m&LogSource != 0 {
|
||||
buf.WriteByte('S')
|
||||
}
|
||||
if m&SanityCheckFunctions != 0 {
|
||||
buf.WriteByte('C')
|
||||
}
|
||||
if m&NaiveForm != 0 {
|
||||
buf.WriteByte('N')
|
||||
}
|
||||
if m&BuildSerially != 0 {
|
||||
buf.WriteByte('L')
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Set parses the flag characters in s and updates *m.
|
||||
func (m *BuilderMode) Set(s string) error {
|
||||
var mode BuilderMode
|
||||
for _, c := range s {
|
||||
switch c {
|
||||
case 'D':
|
||||
mode |= GlobalDebug
|
||||
case 'P':
|
||||
mode |= PrintPackages
|
||||
case 'F':
|
||||
mode |= PrintFunctions
|
||||
case 'S':
|
||||
mode |= LogSource | BuildSerially
|
||||
case 'C':
|
||||
mode |= SanityCheckFunctions
|
||||
case 'N':
|
||||
mode |= NaiveForm
|
||||
case 'L':
|
||||
mode |= BuildSerially
|
||||
default:
|
||||
return fmt.Errorf("unknown BuilderMode option: %q", c)
|
||||
}
|
||||
}
|
||||
*m = mode
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns m.
|
||||
func (m BuilderMode) Get() interface{} { return m }
|
||||
431
vendor/golang.org/x/tools/go/ssa/print.go
generated
vendored
Normal file
431
vendor/golang.org/x/tools/go/ssa/print.go
generated
vendored
Normal file
@@ -0,0 +1,431 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file implements the String() methods for all Value and
|
||||
// Instruction types.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"golang.org/x/tools/go/types/typeutil"
|
||||
)
|
||||
|
||||
// relName returns the name of v relative to i.
|
||||
// In most cases, this is identical to v.Name(), but references to
|
||||
// Functions (including methods) and Globals use RelString and
|
||||
// all types are displayed with relType, so that only cross-package
|
||||
// references are package-qualified.
|
||||
//
|
||||
func relName(v Value, i Instruction) string {
|
||||
var from *types.Package
|
||||
if i != nil {
|
||||
from = i.Parent().pkg()
|
||||
}
|
||||
switch v := v.(type) {
|
||||
case Member: // *Function or *Global
|
||||
return v.RelString(from)
|
||||
case *Const:
|
||||
return v.RelString(from)
|
||||
}
|
||||
return v.Name()
|
||||
}
|
||||
|
||||
func relType(t types.Type, from *types.Package) string {
|
||||
return types.TypeString(t, types.RelativeTo(from))
|
||||
}
|
||||
|
||||
func relString(m Member, from *types.Package) string {
|
||||
// NB: not all globals have an Object (e.g. init$guard),
|
||||
// so use Package().Object not Object.Package().
|
||||
if pkg := m.Package().Pkg; pkg != nil && pkg != from {
|
||||
return fmt.Sprintf("%s.%s", pkg.Path(), m.Name())
|
||||
}
|
||||
return m.Name()
|
||||
}
|
||||
|
||||
// Value.String()
|
||||
//
|
||||
// This method is provided only for debugging.
|
||||
// It never appears in disassembly, which uses Value.Name().
|
||||
|
||||
func (v *Parameter) String() string {
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("parameter %s : %s", v.Name(), relType(v.Type(), from))
|
||||
}
|
||||
|
||||
func (v *FreeVar) String() string {
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("freevar %s : %s", v.Name(), relType(v.Type(), from))
|
||||
}
|
||||
|
||||
func (v *Builtin) String() string {
|
||||
return fmt.Sprintf("builtin %s", v.Name())
|
||||
}
|
||||
|
||||
// Instruction.String()
|
||||
|
||||
func (v *Alloc) String() string {
|
||||
op := "local"
|
||||
if v.Heap {
|
||||
op = "new"
|
||||
}
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("%s %s (%s)", op, relType(deref(v.Type()), from), v.Comment)
|
||||
}
|
||||
|
||||
func (v *Phi) String() string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("phi [")
|
||||
for i, edge := range v.Edges {
|
||||
if i > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
// Be robust against malformed CFG.
|
||||
if v.block == nil {
|
||||
b.WriteString("??")
|
||||
continue
|
||||
}
|
||||
block := -1
|
||||
if i < len(v.block.Preds) {
|
||||
block = v.block.Preds[i].Index
|
||||
}
|
||||
fmt.Fprintf(&b, "%d: ", block)
|
||||
edgeVal := "<nil>" // be robust
|
||||
if edge != nil {
|
||||
edgeVal = relName(edge, v)
|
||||
}
|
||||
b.WriteString(edgeVal)
|
||||
}
|
||||
b.WriteString("]")
|
||||
if v.Comment != "" {
|
||||
b.WriteString(" #")
|
||||
b.WriteString(v.Comment)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func printCall(v *CallCommon, prefix string, instr Instruction) string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString(prefix)
|
||||
if !v.IsInvoke() {
|
||||
b.WriteString(relName(v.Value, instr))
|
||||
} else {
|
||||
fmt.Fprintf(&b, "invoke %s.%s", relName(v.Value, instr), v.Method.Name())
|
||||
}
|
||||
b.WriteString("(")
|
||||
for i, arg := range v.Args {
|
||||
if i > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
b.WriteString(relName(arg, instr))
|
||||
}
|
||||
if v.Signature().Variadic() {
|
||||
b.WriteString("...")
|
||||
}
|
||||
b.WriteString(")")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (c *CallCommon) String() string {
|
||||
return printCall(c, "", nil)
|
||||
}
|
||||
|
||||
func (v *Call) String() string {
|
||||
return printCall(&v.Call, "", v)
|
||||
}
|
||||
|
||||
func (v *BinOp) String() string {
|
||||
return fmt.Sprintf("%s %s %s", relName(v.X, v), v.Op.String(), relName(v.Y, v))
|
||||
}
|
||||
|
||||
func (v *UnOp) String() string {
|
||||
return fmt.Sprintf("%s%s%s", v.Op, relName(v.X, v), commaOk(v.CommaOk))
|
||||
}
|
||||
|
||||
func printConv(prefix string, v, x Value) string {
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("%s %s <- %s (%s)",
|
||||
prefix,
|
||||
relType(v.Type(), from),
|
||||
relType(x.Type(), from),
|
||||
relName(x, v.(Instruction)))
|
||||
}
|
||||
|
||||
func (v *ChangeType) String() string { return printConv("changetype", v, v.X) }
|
||||
func (v *Convert) String() string { return printConv("convert", v, v.X) }
|
||||
func (v *ChangeInterface) String() string { return printConv("change interface", v, v.X) }
|
||||
func (v *MakeInterface) String() string { return printConv("make", v, v.X) }
|
||||
|
||||
func (v *MakeClosure) String() string {
|
||||
var b bytes.Buffer
|
||||
fmt.Fprintf(&b, "make closure %s", relName(v.Fn, v))
|
||||
if v.Bindings != nil {
|
||||
b.WriteString(" [")
|
||||
for i, c := range v.Bindings {
|
||||
if i > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
b.WriteString(relName(c, v))
|
||||
}
|
||||
b.WriteString("]")
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (v *MakeSlice) String() string {
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("make %s %s %s",
|
||||
relType(v.Type(), from),
|
||||
relName(v.Len, v),
|
||||
relName(v.Cap, v))
|
||||
}
|
||||
|
||||
func (v *Slice) String() string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("slice ")
|
||||
b.WriteString(relName(v.X, v))
|
||||
b.WriteString("[")
|
||||
if v.Low != nil {
|
||||
b.WriteString(relName(v.Low, v))
|
||||
}
|
||||
b.WriteString(":")
|
||||
if v.High != nil {
|
||||
b.WriteString(relName(v.High, v))
|
||||
}
|
||||
if v.Max != nil {
|
||||
b.WriteString(":")
|
||||
b.WriteString(relName(v.Max, v))
|
||||
}
|
||||
b.WriteString("]")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (v *MakeMap) String() string {
|
||||
res := ""
|
||||
if v.Reserve != nil {
|
||||
res = relName(v.Reserve, v)
|
||||
}
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("make %s %s", relType(v.Type(), from), res)
|
||||
}
|
||||
|
||||
func (v *MakeChan) String() string {
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("make %s %s", relType(v.Type(), from), relName(v.Size, v))
|
||||
}
|
||||
|
||||
func (v *FieldAddr) String() string {
|
||||
st := deref(v.X.Type()).Underlying().(*types.Struct)
|
||||
// Be robust against a bad index.
|
||||
name := "?"
|
||||
if 0 <= v.Field && v.Field < st.NumFields() {
|
||||
name = st.Field(v.Field).Name()
|
||||
}
|
||||
return fmt.Sprintf("&%s.%s [#%d]", relName(v.X, v), name, v.Field)
|
||||
}
|
||||
|
||||
func (v *Field) String() string {
|
||||
st := v.X.Type().Underlying().(*types.Struct)
|
||||
// Be robust against a bad index.
|
||||
name := "?"
|
||||
if 0 <= v.Field && v.Field < st.NumFields() {
|
||||
name = st.Field(v.Field).Name()
|
||||
}
|
||||
return fmt.Sprintf("%s.%s [#%d]", relName(v.X, v), name, v.Field)
|
||||
}
|
||||
|
||||
func (v *IndexAddr) String() string {
|
||||
return fmt.Sprintf("&%s[%s]", relName(v.X, v), relName(v.Index, v))
|
||||
}
|
||||
|
||||
func (v *Index) String() string {
|
||||
return fmt.Sprintf("%s[%s]", relName(v.X, v), relName(v.Index, v))
|
||||
}
|
||||
|
||||
func (v *Lookup) String() string {
|
||||
return fmt.Sprintf("%s[%s]%s", relName(v.X, v), relName(v.Index, v), commaOk(v.CommaOk))
|
||||
}
|
||||
|
||||
func (v *Range) String() string {
|
||||
return "range " + relName(v.X, v)
|
||||
}
|
||||
|
||||
func (v *Next) String() string {
|
||||
return "next " + relName(v.Iter, v)
|
||||
}
|
||||
|
||||
func (v *TypeAssert) String() string {
|
||||
from := v.Parent().pkg()
|
||||
return fmt.Sprintf("typeassert%s %s.(%s)", commaOk(v.CommaOk), relName(v.X, v), relType(v.AssertedType, from))
|
||||
}
|
||||
|
||||
func (v *Extract) String() string {
|
||||
return fmt.Sprintf("extract %s #%d", relName(v.Tuple, v), v.Index)
|
||||
}
|
||||
|
||||
func (s *Jump) String() string {
|
||||
// Be robust against malformed CFG.
|
||||
block := -1
|
||||
if s.block != nil && len(s.block.Succs) == 1 {
|
||||
block = s.block.Succs[0].Index
|
||||
}
|
||||
return fmt.Sprintf("jump %d", block)
|
||||
}
|
||||
|
||||
func (s *If) String() string {
|
||||
// Be robust against malformed CFG.
|
||||
tblock, fblock := -1, -1
|
||||
if s.block != nil && len(s.block.Succs) == 2 {
|
||||
tblock = s.block.Succs[0].Index
|
||||
fblock = s.block.Succs[1].Index
|
||||
}
|
||||
return fmt.Sprintf("if %s goto %d else %d", relName(s.Cond, s), tblock, fblock)
|
||||
}
|
||||
|
||||
func (s *Go) String() string {
|
||||
return printCall(&s.Call, "go ", s)
|
||||
}
|
||||
|
||||
func (s *Panic) String() string {
|
||||
return "panic " + relName(s.X, s)
|
||||
}
|
||||
|
||||
func (s *Return) String() string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("return")
|
||||
for i, r := range s.Results {
|
||||
if i == 0 {
|
||||
b.WriteString(" ")
|
||||
} else {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
b.WriteString(relName(r, s))
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (*RunDefers) String() string {
|
||||
return "rundefers"
|
||||
}
|
||||
|
||||
func (s *Send) String() string {
|
||||
return fmt.Sprintf("send %s <- %s", relName(s.Chan, s), relName(s.X, s))
|
||||
}
|
||||
|
||||
func (s *Defer) String() string {
|
||||
return printCall(&s.Call, "defer ", s)
|
||||
}
|
||||
|
||||
func (s *Select) String() string {
|
||||
var b bytes.Buffer
|
||||
for i, st := range s.States {
|
||||
if i > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
if st.Dir == types.RecvOnly {
|
||||
b.WriteString("<-")
|
||||
b.WriteString(relName(st.Chan, s))
|
||||
} else {
|
||||
b.WriteString(relName(st.Chan, s))
|
||||
b.WriteString("<-")
|
||||
b.WriteString(relName(st.Send, s))
|
||||
}
|
||||
}
|
||||
non := ""
|
||||
if !s.Blocking {
|
||||
non = "non"
|
||||
}
|
||||
return fmt.Sprintf("select %sblocking [%s]", non, b.String())
|
||||
}
|
||||
|
||||
func (s *Store) String() string {
|
||||
return fmt.Sprintf("*%s = %s", relName(s.Addr, s), relName(s.Val, s))
|
||||
}
|
||||
|
||||
func (s *MapUpdate) String() string {
|
||||
return fmt.Sprintf("%s[%s] = %s", relName(s.Map, s), relName(s.Key, s), relName(s.Value, s))
|
||||
}
|
||||
|
||||
func (s *DebugRef) String() string {
|
||||
p := s.Parent().Prog.Fset.Position(s.Pos())
|
||||
var descr interface{}
|
||||
if s.object != nil {
|
||||
descr = s.object // e.g. "var x int"
|
||||
} else {
|
||||
descr = reflect.TypeOf(s.Expr) // e.g. "*ast.CallExpr"
|
||||
}
|
||||
var addr string
|
||||
if s.IsAddr {
|
||||
addr = "address of "
|
||||
}
|
||||
return fmt.Sprintf("; %s%s @ %d:%d is %s", addr, descr, p.Line, p.Column, s.X.Name())
|
||||
}
|
||||
|
||||
func (p *Package) String() string {
|
||||
return "package " + p.Pkg.Path()
|
||||
}
|
||||
|
||||
var _ io.WriterTo = (*Package)(nil) // *Package implements io.Writer
|
||||
|
||||
func (p *Package) WriteTo(w io.Writer) (int64, error) {
|
||||
var buf bytes.Buffer
|
||||
WritePackage(&buf, p)
|
||||
n, err := w.Write(buf.Bytes())
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
// WritePackage writes to buf a human-readable summary of p.
|
||||
func WritePackage(buf *bytes.Buffer, p *Package) {
|
||||
fmt.Fprintf(buf, "%s:\n", p)
|
||||
|
||||
var names []string
|
||||
maxname := 0
|
||||
for name := range p.Members {
|
||||
if l := len(name); l > maxname {
|
||||
maxname = l
|
||||
}
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
from := p.Pkg
|
||||
sort.Strings(names)
|
||||
for _, name := range names {
|
||||
switch mem := p.Members[name].(type) {
|
||||
case *NamedConst:
|
||||
fmt.Fprintf(buf, " const %-*s %s = %s\n",
|
||||
maxname, name, mem.Name(), mem.Value.RelString(from))
|
||||
|
||||
case *Function:
|
||||
fmt.Fprintf(buf, " func %-*s %s\n",
|
||||
maxname, name, relType(mem.Type(), from))
|
||||
|
||||
case *Type:
|
||||
fmt.Fprintf(buf, " type %-*s %s\n",
|
||||
maxname, name, relType(mem.Type().Underlying(), from))
|
||||
for _, meth := range typeutil.IntuitiveMethodSet(mem.Type(), &p.Prog.MethodSets) {
|
||||
fmt.Fprintf(buf, " %s\n", types.SelectionString(meth, types.RelativeTo(from)))
|
||||
}
|
||||
|
||||
case *Global:
|
||||
fmt.Fprintf(buf, " var %-*s %s\n",
|
||||
maxname, name, relType(mem.Type().(*types.Pointer).Elem(), from))
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(buf, "\n")
|
||||
}
|
||||
|
||||
func commaOk(x bool) string {
|
||||
if x {
|
||||
return ",ok"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
532
vendor/golang.org/x/tools/go/ssa/sanity.go
generated
vendored
Normal file
532
vendor/golang.org/x/tools/go/ssa/sanity.go
generated
vendored
Normal file
@@ -0,0 +1,532 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// An optional pass for sanity-checking invariants of the SSA representation.
|
||||
// Currently it checks CFG invariants but little at the instruction level.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/types"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type sanity struct {
|
||||
reporter io.Writer
|
||||
fn *Function
|
||||
block *BasicBlock
|
||||
instrs map[Instruction]struct{}
|
||||
insane bool
|
||||
}
|
||||
|
||||
// sanityCheck performs integrity checking of the SSA representation
|
||||
// of the function fn and returns true if it was valid. Diagnostics
|
||||
// are written to reporter if non-nil, os.Stderr otherwise. Some
|
||||
// diagnostics are only warnings and do not imply a negative result.
|
||||
//
|
||||
// Sanity-checking is intended to facilitate the debugging of code
|
||||
// transformation passes.
|
||||
//
|
||||
func sanityCheck(fn *Function, reporter io.Writer) bool {
|
||||
if reporter == nil {
|
||||
reporter = os.Stderr
|
||||
}
|
||||
return (&sanity{reporter: reporter}).checkFunction(fn)
|
||||
}
|
||||
|
||||
// mustSanityCheck is like sanityCheck but panics instead of returning
|
||||
// a negative result.
|
||||
//
|
||||
func mustSanityCheck(fn *Function, reporter io.Writer) {
|
||||
if !sanityCheck(fn, reporter) {
|
||||
fn.WriteTo(os.Stderr)
|
||||
panic("SanityCheck failed")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sanity) diagnostic(prefix, format string, args ...interface{}) {
|
||||
fmt.Fprintf(s.reporter, "%s: function %s", prefix, s.fn)
|
||||
if s.block != nil {
|
||||
fmt.Fprintf(s.reporter, ", block %s", s.block)
|
||||
}
|
||||
io.WriteString(s.reporter, ": ")
|
||||
fmt.Fprintf(s.reporter, format, args...)
|
||||
io.WriteString(s.reporter, "\n")
|
||||
}
|
||||
|
||||
func (s *sanity) errorf(format string, args ...interface{}) {
|
||||
s.insane = true
|
||||
s.diagnostic("Error", format, args...)
|
||||
}
|
||||
|
||||
func (s *sanity) warnf(format string, args ...interface{}) {
|
||||
s.diagnostic("Warning", format, args...)
|
||||
}
|
||||
|
||||
// findDuplicate returns an arbitrary basic block that appeared more
|
||||
// than once in blocks, or nil if all were unique.
|
||||
func findDuplicate(blocks []*BasicBlock) *BasicBlock {
|
||||
if len(blocks) < 2 {
|
||||
return nil
|
||||
}
|
||||
if blocks[0] == blocks[1] {
|
||||
return blocks[0]
|
||||
}
|
||||
// Slow path:
|
||||
m := make(map[*BasicBlock]bool)
|
||||
for _, b := range blocks {
|
||||
if m[b] {
|
||||
return b
|
||||
}
|
||||
m[b] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sanity) checkInstr(idx int, instr Instruction) {
|
||||
switch instr := instr.(type) {
|
||||
case *If, *Jump, *Return, *Panic:
|
||||
s.errorf("control flow instruction not at end of block")
|
||||
case *Phi:
|
||||
if idx == 0 {
|
||||
// It suffices to apply this check to just the first phi node.
|
||||
if dup := findDuplicate(s.block.Preds); dup != nil {
|
||||
s.errorf("phi node in block with duplicate predecessor %s", dup)
|
||||
}
|
||||
} else {
|
||||
prev := s.block.Instrs[idx-1]
|
||||
if _, ok := prev.(*Phi); !ok {
|
||||
s.errorf("Phi instruction follows a non-Phi: %T", prev)
|
||||
}
|
||||
}
|
||||
if ne, np := len(instr.Edges), len(s.block.Preds); ne != np {
|
||||
s.errorf("phi node has %d edges but %d predecessors", ne, np)
|
||||
|
||||
} else {
|
||||
for i, e := range instr.Edges {
|
||||
if e == nil {
|
||||
s.errorf("phi node '%s' has no value for edge #%d from %s", instr.Comment, i, s.block.Preds[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case *Alloc:
|
||||
if !instr.Heap {
|
||||
found := false
|
||||
for _, l := range s.fn.Locals {
|
||||
if l == instr {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
s.errorf("local alloc %s = %s does not appear in Function.Locals", instr.Name(), instr)
|
||||
}
|
||||
}
|
||||
|
||||
case *BinOp:
|
||||
case *Call:
|
||||
case *ChangeInterface:
|
||||
case *ChangeType:
|
||||
case *Convert:
|
||||
if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok {
|
||||
if _, ok := instr.Type().Underlying().(*types.Basic); !ok {
|
||||
s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type())
|
||||
}
|
||||
}
|
||||
|
||||
case *Defer:
|
||||
case *Extract:
|
||||
case *Field:
|
||||
case *FieldAddr:
|
||||
case *Go:
|
||||
case *Index:
|
||||
case *IndexAddr:
|
||||
case *Lookup:
|
||||
case *MakeChan:
|
||||
case *MakeClosure:
|
||||
numFree := len(instr.Fn.(*Function).FreeVars)
|
||||
numBind := len(instr.Bindings)
|
||||
if numFree != numBind {
|
||||
s.errorf("MakeClosure has %d Bindings for function %s with %d free vars",
|
||||
numBind, instr.Fn, numFree)
|
||||
|
||||
}
|
||||
if recv := instr.Type().(*types.Signature).Recv(); recv != nil {
|
||||
s.errorf("MakeClosure's type includes receiver %s", recv.Type())
|
||||
}
|
||||
|
||||
case *MakeInterface:
|
||||
case *MakeMap:
|
||||
case *MakeSlice:
|
||||
case *MapUpdate:
|
||||
case *Next:
|
||||
case *Range:
|
||||
case *RunDefers:
|
||||
case *Select:
|
||||
case *Send:
|
||||
case *Slice:
|
||||
case *Store:
|
||||
case *TypeAssert:
|
||||
case *UnOp:
|
||||
case *DebugRef:
|
||||
// TODO(adonovan): implement checks.
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown instruction type: %T", instr))
|
||||
}
|
||||
|
||||
if call, ok := instr.(CallInstruction); ok {
|
||||
if call.Common().Signature() == nil {
|
||||
s.errorf("nil signature: %s", call)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that value-defining instructions have valid types
|
||||
// and a valid referrer list.
|
||||
if v, ok := instr.(Value); ok {
|
||||
t := v.Type()
|
||||
if t == nil {
|
||||
s.errorf("no type: %s = %s", v.Name(), v)
|
||||
} else if t == tRangeIter {
|
||||
// not a proper type; ignore.
|
||||
} else if b, ok := t.Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 {
|
||||
s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t)
|
||||
}
|
||||
s.checkReferrerList(v)
|
||||
}
|
||||
|
||||
// Untyped constants are legal as instruction Operands(),
|
||||
// for example:
|
||||
// _ = "foo"[0]
|
||||
// or:
|
||||
// if wordsize==64 {...}
|
||||
|
||||
// All other non-Instruction Values can be found via their
|
||||
// enclosing Function or Package.
|
||||
}
|
||||
|
||||
func (s *sanity) checkFinalInstr(instr Instruction) {
|
||||
switch instr := instr.(type) {
|
||||
case *If:
|
||||
if nsuccs := len(s.block.Succs); nsuccs != 2 {
|
||||
s.errorf("If-terminated block has %d successors; expected 2", nsuccs)
|
||||
return
|
||||
}
|
||||
if s.block.Succs[0] == s.block.Succs[1] {
|
||||
s.errorf("If-instruction has same True, False target blocks: %s", s.block.Succs[0])
|
||||
return
|
||||
}
|
||||
|
||||
case *Jump:
|
||||
if nsuccs := len(s.block.Succs); nsuccs != 1 {
|
||||
s.errorf("Jump-terminated block has %d successors; expected 1", nsuccs)
|
||||
return
|
||||
}
|
||||
|
||||
case *Return:
|
||||
if nsuccs := len(s.block.Succs); nsuccs != 0 {
|
||||
s.errorf("Return-terminated block has %d successors; expected none", nsuccs)
|
||||
return
|
||||
}
|
||||
if na, nf := len(instr.Results), s.fn.Signature.Results().Len(); nf != na {
|
||||
s.errorf("%d-ary return in %d-ary function", na, nf)
|
||||
}
|
||||
|
||||
case *Panic:
|
||||
if nsuccs := len(s.block.Succs); nsuccs != 0 {
|
||||
s.errorf("Panic-terminated block has %d successors; expected none", nsuccs)
|
||||
return
|
||||
}
|
||||
|
||||
default:
|
||||
s.errorf("non-control flow instruction at end of block")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sanity) checkBlock(b *BasicBlock, index int) {
|
||||
s.block = b
|
||||
|
||||
if b.Index != index {
|
||||
s.errorf("block has incorrect Index %d", b.Index)
|
||||
}
|
||||
if b.parent != s.fn {
|
||||
s.errorf("block has incorrect parent %s", b.parent)
|
||||
}
|
||||
|
||||
// Check all blocks are reachable.
|
||||
// (The entry block is always implicitly reachable,
|
||||
// as is the Recover block, if any.)
|
||||
if (index > 0 && b != b.parent.Recover) && len(b.Preds) == 0 {
|
||||
s.warnf("unreachable block")
|
||||
if b.Instrs == nil {
|
||||
// Since this block is about to be pruned,
|
||||
// tolerating transient problems in it
|
||||
// simplifies other optimizations.
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check predecessor and successor relations are dual,
|
||||
// and that all blocks in CFG belong to same function.
|
||||
for _, a := range b.Preds {
|
||||
found := false
|
||||
for _, bb := range a.Succs {
|
||||
if bb == b {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
s.errorf("expected successor edge in predecessor %s; found only: %s", a, a.Succs)
|
||||
}
|
||||
if a.parent != s.fn {
|
||||
s.errorf("predecessor %s belongs to different function %s", a, a.parent)
|
||||
}
|
||||
}
|
||||
for _, c := range b.Succs {
|
||||
found := false
|
||||
for _, bb := range c.Preds {
|
||||
if bb == b {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
s.errorf("expected predecessor edge in successor %s; found only: %s", c, c.Preds)
|
||||
}
|
||||
if c.parent != s.fn {
|
||||
s.errorf("successor %s belongs to different function %s", c, c.parent)
|
||||
}
|
||||
}
|
||||
|
||||
// Check each instruction is sane.
|
||||
n := len(b.Instrs)
|
||||
if n == 0 {
|
||||
s.errorf("basic block contains no instructions")
|
||||
}
|
||||
var rands [10]*Value // reuse storage
|
||||
for j, instr := range b.Instrs {
|
||||
if instr == nil {
|
||||
s.errorf("nil instruction at index %d", j)
|
||||
continue
|
||||
}
|
||||
if b2 := instr.Block(); b2 == nil {
|
||||
s.errorf("nil Block() for instruction at index %d", j)
|
||||
continue
|
||||
} else if b2 != b {
|
||||
s.errorf("wrong Block() (%s) for instruction at index %d ", b2, j)
|
||||
continue
|
||||
}
|
||||
if j < n-1 {
|
||||
s.checkInstr(j, instr)
|
||||
} else {
|
||||
s.checkFinalInstr(instr)
|
||||
}
|
||||
|
||||
// Check Instruction.Operands.
|
||||
operands:
|
||||
for i, op := range instr.Operands(rands[:0]) {
|
||||
if op == nil {
|
||||
s.errorf("nil operand pointer %d of %s", i, instr)
|
||||
continue
|
||||
}
|
||||
val := *op
|
||||
if val == nil {
|
||||
continue // a nil operand is ok
|
||||
}
|
||||
|
||||
// Check that "untyped" types only appear on constant operands.
|
||||
if _, ok := (*op).(*Const); !ok {
|
||||
if basic, ok := (*op).Type().(*types.Basic); ok {
|
||||
if basic.Info()&types.IsUntyped != 0 {
|
||||
s.errorf("operand #%d of %s is untyped: %s", i, instr, basic)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check that Operands that are also Instructions belong to same function.
|
||||
// TODO(adonovan): also check their block dominates block b.
|
||||
if val, ok := val.(Instruction); ok {
|
||||
if val.Block() == nil {
|
||||
s.errorf("operand %d of %s is an instruction (%s) that belongs to no block", i, instr, val)
|
||||
} else if val.Parent() != s.fn {
|
||||
s.errorf("operand %d of %s is an instruction (%s) from function %s", i, instr, val, val.Parent())
|
||||
}
|
||||
}
|
||||
|
||||
// Check that each function-local operand of
|
||||
// instr refers back to instr. (NB: quadratic)
|
||||
switch val := val.(type) {
|
||||
case *Const, *Global, *Builtin:
|
||||
continue // not local
|
||||
case *Function:
|
||||
if val.parent == nil {
|
||||
continue // only anon functions are local
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(adonovan): check val.Parent() != nil <=> val.Referrers() is defined.
|
||||
|
||||
if refs := val.Referrers(); refs != nil {
|
||||
for _, ref := range *refs {
|
||||
if ref == instr {
|
||||
continue operands
|
||||
}
|
||||
}
|
||||
s.errorf("operand %d of %s (%s) does not refer to us", i, instr, val)
|
||||
} else {
|
||||
s.errorf("operand %d of %s (%s) has no referrers", i, instr, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sanity) checkReferrerList(v Value) {
|
||||
refs := v.Referrers()
|
||||
if refs == nil {
|
||||
s.errorf("%s has missing referrer list", v.Name())
|
||||
return
|
||||
}
|
||||
for i, ref := range *refs {
|
||||
if _, ok := s.instrs[ref]; !ok {
|
||||
s.errorf("%s.Referrers()[%d] = %s is not an instruction belonging to this function", v.Name(), i, ref)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sanity) checkFunction(fn *Function) bool {
|
||||
// TODO(adonovan): check Function invariants:
|
||||
// - check params match signature
|
||||
// - check transient fields are nil
|
||||
// - warn if any fn.Locals do not appear among block instructions.
|
||||
s.fn = fn
|
||||
if fn.Prog == nil {
|
||||
s.errorf("nil Prog")
|
||||
}
|
||||
|
||||
_ = fn.String() // must not crash
|
||||
_ = fn.RelString(fn.pkg()) // must not crash
|
||||
|
||||
// All functions have a package, except delegates (which are
|
||||
// shared across packages, or duplicated as weak symbols in a
|
||||
// separate-compilation model), and error.Error.
|
||||
if fn.Pkg == nil {
|
||||
if strings.HasPrefix(fn.Synthetic, "wrapper ") ||
|
||||
strings.HasPrefix(fn.Synthetic, "bound ") ||
|
||||
strings.HasPrefix(fn.Synthetic, "thunk ") ||
|
||||
strings.HasSuffix(fn.name, "Error") {
|
||||
// ok
|
||||
} else {
|
||||
s.errorf("nil Pkg")
|
||||
}
|
||||
}
|
||||
if src, syn := fn.Synthetic == "", fn.Syntax() != nil; src != syn {
|
||||
s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn)
|
||||
}
|
||||
for i, l := range fn.Locals {
|
||||
if l.Parent() != fn {
|
||||
s.errorf("Local %s at index %d has wrong parent", l.Name(), i)
|
||||
}
|
||||
if l.Heap {
|
||||
s.errorf("Local %s at index %d has Heap flag set", l.Name(), i)
|
||||
}
|
||||
}
|
||||
// Build the set of valid referrers.
|
||||
s.instrs = make(map[Instruction]struct{})
|
||||
for _, b := range fn.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
s.instrs[instr] = struct{}{}
|
||||
}
|
||||
}
|
||||
for i, p := range fn.Params {
|
||||
if p.Parent() != fn {
|
||||
s.errorf("Param %s at index %d has wrong parent", p.Name(), i)
|
||||
}
|
||||
// Check common suffix of Signature and Params match type.
|
||||
if sig := fn.Signature; sig != nil {
|
||||
j := i - len(fn.Params) + sig.Params().Len() // index within sig.Params
|
||||
if j < 0 {
|
||||
continue
|
||||
}
|
||||
if !types.Identical(p.Type(), sig.Params().At(j).Type()) {
|
||||
s.errorf("Param %s at index %d has wrong type (%s, versus %s in Signature)", p.Name(), i, p.Type(), sig.Params().At(j).Type())
|
||||
|
||||
}
|
||||
}
|
||||
s.checkReferrerList(p)
|
||||
}
|
||||
for i, fv := range fn.FreeVars {
|
||||
if fv.Parent() != fn {
|
||||
s.errorf("FreeVar %s at index %d has wrong parent", fv.Name(), i)
|
||||
}
|
||||
s.checkReferrerList(fv)
|
||||
}
|
||||
|
||||
if fn.Blocks != nil && len(fn.Blocks) == 0 {
|
||||
// Function _had_ blocks (so it's not external) but
|
||||
// they were "optimized" away, even the entry block.
|
||||
s.errorf("Blocks slice is non-nil but empty")
|
||||
}
|
||||
for i, b := range fn.Blocks {
|
||||
if b == nil {
|
||||
s.warnf("nil *BasicBlock at f.Blocks[%d]", i)
|
||||
continue
|
||||
}
|
||||
s.checkBlock(b, i)
|
||||
}
|
||||
if fn.Recover != nil && fn.Blocks[fn.Recover.Index] != fn.Recover {
|
||||
s.errorf("Recover block is not in Blocks slice")
|
||||
}
|
||||
|
||||
s.block = nil
|
||||
for i, anon := range fn.AnonFuncs {
|
||||
if anon.Parent() != fn {
|
||||
s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent())
|
||||
}
|
||||
}
|
||||
s.fn = nil
|
||||
return !s.insane
|
||||
}
|
||||
|
||||
// sanityCheckPackage checks invariants of packages upon creation.
|
||||
// It does not require that the package is built.
|
||||
// Unlike sanityCheck (for functions), it just panics at the first error.
|
||||
func sanityCheckPackage(pkg *Package) {
|
||||
if pkg.Pkg == nil {
|
||||
panic(fmt.Sprintf("Package %s has no Object", pkg))
|
||||
}
|
||||
_ = pkg.String() // must not crash
|
||||
|
||||
for name, mem := range pkg.Members {
|
||||
if name != mem.Name() {
|
||||
panic(fmt.Sprintf("%s: %T.Name() = %s, want %s",
|
||||
pkg.Pkg.Path(), mem, mem.Name(), name))
|
||||
}
|
||||
obj := mem.Object()
|
||||
if obj == nil {
|
||||
// This check is sound because fields
|
||||
// {Global,Function}.object have type
|
||||
// types.Object. (If they were declared as
|
||||
// *types.{Var,Func}, we'd have a non-empty
|
||||
// interface containing a nil pointer.)
|
||||
|
||||
continue // not all members have typechecker objects
|
||||
}
|
||||
if obj.Name() != name {
|
||||
if obj.Name() == "init" && strings.HasPrefix(mem.Name(), "init#") {
|
||||
// Ok. The name of a declared init function varies between
|
||||
// its types.Func ("init") and its ssa.Function ("init#%d").
|
||||
} else {
|
||||
panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s",
|
||||
pkg.Pkg.Path(), mem, obj.Name(), name))
|
||||
}
|
||||
}
|
||||
if obj.Pos() != mem.Pos() {
|
||||
panic(fmt.Sprintf("%s Pos=%d obj.Pos=%d", mem, mem.Pos(), obj.Pos()))
|
||||
}
|
||||
}
|
||||
}
|
||||
293
vendor/golang.org/x/tools/go/ssa/source.go
generated
vendored
Normal file
293
vendor/golang.org/x/tools/go/ssa/source.go
generated
vendored
Normal file
@@ -0,0 +1,293 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines utilities for working with source positions
|
||||
// or source-level named entities ("objects").
|
||||
|
||||
// TODO(adonovan): test that {Value,Instruction}.Pos() positions match
|
||||
// the originating syntax, as specified.
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// EnclosingFunction returns the function that contains the syntax
|
||||
// node denoted by path.
|
||||
//
|
||||
// Syntax associated with package-level variable specifications is
|
||||
// enclosed by the package's init() function.
|
||||
//
|
||||
// Returns nil if not found; reasons might include:
|
||||
// - the node is not enclosed by any function.
|
||||
// - the node is within an anonymous function (FuncLit) and
|
||||
// its SSA function has not been created yet
|
||||
// (pkg.Build() has not yet been called).
|
||||
//
|
||||
func EnclosingFunction(pkg *Package, path []ast.Node) *Function {
|
||||
// Start with package-level function...
|
||||
fn := findEnclosingPackageLevelFunction(pkg, path)
|
||||
if fn == nil {
|
||||
return nil // not in any function
|
||||
}
|
||||
|
||||
// ...then walk down the nested anonymous functions.
|
||||
n := len(path)
|
||||
outer:
|
||||
for i := range path {
|
||||
if lit, ok := path[n-1-i].(*ast.FuncLit); ok {
|
||||
for _, anon := range fn.AnonFuncs {
|
||||
if anon.Pos() == lit.Type.Func {
|
||||
fn = anon
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
// SSA function not found:
|
||||
// - package not yet built, or maybe
|
||||
// - builder skipped FuncLit in dead block
|
||||
// (in principle; but currently the Builder
|
||||
// generates even dead FuncLits).
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fn
|
||||
}
|
||||
|
||||
// HasEnclosingFunction returns true if the AST node denoted by path
|
||||
// is contained within the declaration of some function or
|
||||
// package-level variable.
|
||||
//
|
||||
// Unlike EnclosingFunction, the behaviour of this function does not
|
||||
// depend on whether SSA code for pkg has been built, so it can be
|
||||
// used to quickly reject check inputs that will cause
|
||||
// EnclosingFunction to fail, prior to SSA building.
|
||||
//
|
||||
func HasEnclosingFunction(pkg *Package, path []ast.Node) bool {
|
||||
return findEnclosingPackageLevelFunction(pkg, path) != nil
|
||||
}
|
||||
|
||||
// findEnclosingPackageLevelFunction returns the Function
|
||||
// corresponding to the package-level function enclosing path.
|
||||
//
|
||||
func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function {
|
||||
if n := len(path); n >= 2 { // [... {Gen,Func}Decl File]
|
||||
switch decl := path[n-2].(type) {
|
||||
case *ast.GenDecl:
|
||||
if decl.Tok == token.VAR && n >= 3 {
|
||||
// Package-level 'var' initializer.
|
||||
return pkg.init
|
||||
}
|
||||
|
||||
case *ast.FuncDecl:
|
||||
if decl.Recv == nil && decl.Name.Name == "init" {
|
||||
// Explicit init() function.
|
||||
for _, b := range pkg.init.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
if instr, ok := instr.(*Call); ok {
|
||||
if callee, ok := instr.Call.Value.(*Function); ok && callee.Pkg == pkg && callee.Pos() == decl.Name.NamePos {
|
||||
return callee
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Hack: return non-nil when SSA is not yet
|
||||
// built so that HasEnclosingFunction works.
|
||||
return pkg.init
|
||||
}
|
||||
// Declared function/method.
|
||||
return findNamedFunc(pkg, decl.Name.NamePos)
|
||||
}
|
||||
}
|
||||
return nil // not in any function
|
||||
}
|
||||
|
||||
// findNamedFunc returns the named function whose FuncDecl.Ident is at
|
||||
// position pos.
|
||||
//
|
||||
func findNamedFunc(pkg *Package, pos token.Pos) *Function {
|
||||
// Look at all package members and method sets of named types.
|
||||
// Not very efficient.
|
||||
for _, mem := range pkg.Members {
|
||||
switch mem := mem.(type) {
|
||||
case *Function:
|
||||
if mem.Pos() == pos {
|
||||
return mem
|
||||
}
|
||||
case *Type:
|
||||
mset := pkg.Prog.MethodSets.MethodSet(types.NewPointer(mem.Type()))
|
||||
for i, n := 0, mset.Len(); i < n; i++ {
|
||||
// Don't call Program.Method: avoid creating wrappers.
|
||||
obj := mset.At(i).Obj().(*types.Func)
|
||||
if obj.Pos() == pos {
|
||||
return pkg.values[obj].(*Function)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValueForExpr returns the SSA Value that corresponds to non-constant
|
||||
// expression e.
|
||||
//
|
||||
// It returns nil if no value was found, e.g.
|
||||
// - the expression is not lexically contained within f;
|
||||
// - f was not built with debug information; or
|
||||
// - e is a constant expression. (For efficiency, no debug
|
||||
// information is stored for constants. Use
|
||||
// go/types.Info.Types[e].Value instead.)
|
||||
// - e is a reference to nil or a built-in function.
|
||||
// - the value was optimised away.
|
||||
//
|
||||
// If e is an addressable expression used in an lvalue context,
|
||||
// value is the address denoted by e, and isAddr is true.
|
||||
//
|
||||
// The types of e (or &e, if isAddr) and the result are equal
|
||||
// (modulo "untyped" bools resulting from comparisons).
|
||||
//
|
||||
// (Tip: to find the ssa.Value given a source position, use
|
||||
// astutil.PathEnclosingInterval to locate the ast.Node, then
|
||||
// EnclosingFunction to locate the Function, then ValueForExpr to find
|
||||
// the ssa.Value.)
|
||||
//
|
||||
func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) {
|
||||
if f.debugInfo() { // (opt)
|
||||
e = unparen(e)
|
||||
for _, b := range f.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
if ref, ok := instr.(*DebugRef); ok {
|
||||
if ref.Expr == e {
|
||||
return ref.X, ref.IsAddr
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// --- Lookup functions for source-level named entities (types.Objects) ---
|
||||
|
||||
// Package returns the SSA Package corresponding to the specified
|
||||
// type-checker package object.
|
||||
// It returns nil if no such SSA package has been created.
|
||||
//
|
||||
func (prog *Program) Package(obj *types.Package) *Package {
|
||||
return prog.packages[obj]
|
||||
}
|
||||
|
||||
// packageLevelValue returns the package-level value corresponding to
|
||||
// the specified named object, which may be a package-level const
|
||||
// (*Const), var (*Global) or func (*Function) of some package in
|
||||
// prog. It returns nil if the object is not found.
|
||||
//
|
||||
func (prog *Program) packageLevelValue(obj types.Object) Value {
|
||||
if pkg, ok := prog.packages[obj.Pkg()]; ok {
|
||||
return pkg.values[obj]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FuncValue returns the concrete Function denoted by the source-level
|
||||
// named function obj, or nil if obj denotes an interface method.
|
||||
//
|
||||
// TODO(adonovan): check the invariant that obj.Type() matches the
|
||||
// result's Signature, both in the params/results and in the receiver.
|
||||
//
|
||||
func (prog *Program) FuncValue(obj *types.Func) *Function {
|
||||
fn, _ := prog.packageLevelValue(obj).(*Function)
|
||||
return fn
|
||||
}
|
||||
|
||||
// ConstValue returns the SSA Value denoted by the source-level named
|
||||
// constant obj.
|
||||
//
|
||||
func (prog *Program) ConstValue(obj *types.Const) *Const {
|
||||
// TODO(adonovan): opt: share (don't reallocate)
|
||||
// Consts for const objects and constant ast.Exprs.
|
||||
|
||||
// Universal constant? {true,false,nil}
|
||||
if obj.Parent() == types.Universe {
|
||||
return NewConst(obj.Val(), obj.Type())
|
||||
}
|
||||
// Package-level named constant?
|
||||
if v := prog.packageLevelValue(obj); v != nil {
|
||||
return v.(*Const)
|
||||
}
|
||||
return NewConst(obj.Val(), obj.Type())
|
||||
}
|
||||
|
||||
// VarValue returns the SSA Value that corresponds to a specific
|
||||
// identifier denoting the source-level named variable obj.
|
||||
//
|
||||
// VarValue returns nil if a local variable was not found, perhaps
|
||||
// because its package was not built, the debug information was not
|
||||
// requested during SSA construction, or the value was optimized away.
|
||||
//
|
||||
// ref is the path to an ast.Ident (e.g. from PathEnclosingInterval),
|
||||
// and that ident must resolve to obj.
|
||||
//
|
||||
// pkg is the package enclosing the reference. (A reference to a var
|
||||
// always occurs within a function, so we need to know where to find it.)
|
||||
//
|
||||
// If the identifier is a field selector and its base expression is
|
||||
// non-addressable, then VarValue returns the value of that field.
|
||||
// For example:
|
||||
// func f() struct {x int}
|
||||
// f().x // VarValue(x) returns a *Field instruction of type int
|
||||
//
|
||||
// All other identifiers denote addressable locations (variables).
|
||||
// For them, VarValue may return either the variable's address or its
|
||||
// value, even when the expression is evaluated only for its value; the
|
||||
// situation is reported by isAddr, the second component of the result.
|
||||
//
|
||||
// If !isAddr, the returned value is the one associated with the
|
||||
// specific identifier. For example,
|
||||
// var x int // VarValue(x) returns Const 0 here
|
||||
// x = 1 // VarValue(x) returns Const 1 here
|
||||
//
|
||||
// It is not specified whether the value or the address is returned in
|
||||
// any particular case, as it may depend upon optimizations performed
|
||||
// during SSA code generation, such as registerization, constant
|
||||
// folding, avoidance of materialization of subexpressions, etc.
|
||||
//
|
||||
func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) {
|
||||
// All references to a var are local to some function, possibly init.
|
||||
fn := EnclosingFunction(pkg, ref)
|
||||
if fn == nil {
|
||||
return // e.g. def of struct field; SSA not built?
|
||||
}
|
||||
|
||||
id := ref[0].(*ast.Ident)
|
||||
|
||||
// Defining ident of a parameter?
|
||||
if id.Pos() == obj.Pos() {
|
||||
for _, param := range fn.Params {
|
||||
if param.Object() == obj {
|
||||
return param, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Other ident?
|
||||
for _, b := range fn.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
if dr, ok := instr.(*DebugRef); ok {
|
||||
if dr.Pos() == id.Pos() {
|
||||
return dr.X, dr.IsAddr
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Defining ident of package-level var?
|
||||
if v := prog.packageLevelValue(obj); v != nil {
|
||||
return v.(*Global), true
|
||||
}
|
||||
|
||||
return // e.g. debug info not requested, or var optimized away
|
||||
}
|
||||
1695
vendor/golang.org/x/tools/go/ssa/ssa.go
generated
vendored
Normal file
1695
vendor/golang.org/x/tools/go/ssa/ssa.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
175
vendor/golang.org/x/tools/go/ssa/ssautil/load.go
generated
vendored
Normal file
175
vendor/golang.org/x/tools/go/ssa/ssautil/load.go
generated
vendored
Normal file
@@ -0,0 +1,175 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssautil
|
||||
|
||||
// This file defines utility functions for constructing programs in SSA form.
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
|
||||
"golang.org/x/tools/go/loader"
|
||||
"golang.org/x/tools/go/packages"
|
||||
"golang.org/x/tools/go/ssa"
|
||||
)
|
||||
|
||||
// Packages creates an SSA program for a set of packages.
|
||||
//
|
||||
// The packages must have been loaded from source syntax using the
|
||||
// golang.org/x/tools/go/packages.Load function in LoadSyntax or
|
||||
// LoadAllSyntax mode.
|
||||
//
|
||||
// Packages creates an SSA package for each well-typed package in the
|
||||
// initial list, plus all their dependencies. The resulting list of
|
||||
// packages corresponds to the list of initial packages, and may contain
|
||||
// a nil if SSA code could not be constructed for the corresponding initial
|
||||
// package due to type errors.
|
||||
//
|
||||
// Code for bodies of functions is not built until Build is called on
|
||||
// the resulting Program. SSA code is constructed only for the initial
|
||||
// packages with well-typed syntax trees.
|
||||
//
|
||||
// The mode parameter controls diagnostics and checking during SSA construction.
|
||||
//
|
||||
func Packages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, []*ssa.Package) {
|
||||
return doPackages(initial, mode, false)
|
||||
}
|
||||
|
||||
// AllPackages creates an SSA program for a set of packages plus all
|
||||
// their dependencies.
|
||||
//
|
||||
// The packages must have been loaded from source syntax using the
|
||||
// golang.org/x/tools/go/packages.Load function in LoadAllSyntax mode.
|
||||
//
|
||||
// AllPackages creates an SSA package for each well-typed package in the
|
||||
// initial list, plus all their dependencies. The resulting list of
|
||||
// packages corresponds to the list of initial packages, and may contain
|
||||
// a nil if SSA code could not be constructed for the corresponding
|
||||
// initial package due to type errors.
|
||||
//
|
||||
// Code for bodies of functions is not built until Build is called on
|
||||
// the resulting Program. SSA code is constructed for all packages with
|
||||
// well-typed syntax trees.
|
||||
//
|
||||
// The mode parameter controls diagnostics and checking during SSA construction.
|
||||
//
|
||||
func AllPackages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, []*ssa.Package) {
|
||||
return doPackages(initial, mode, true)
|
||||
}
|
||||
|
||||
func doPackages(initial []*packages.Package, mode ssa.BuilderMode, deps bool) (*ssa.Program, []*ssa.Package) {
|
||||
|
||||
var fset *token.FileSet
|
||||
if len(initial) > 0 {
|
||||
fset = initial[0].Fset
|
||||
}
|
||||
|
||||
prog := ssa.NewProgram(fset, mode)
|
||||
|
||||
isInitial := make(map[*packages.Package]bool, len(initial))
|
||||
for _, p := range initial {
|
||||
isInitial[p] = true
|
||||
}
|
||||
|
||||
ssamap := make(map[*packages.Package]*ssa.Package)
|
||||
packages.Visit(initial, nil, func(p *packages.Package) {
|
||||
if p.Types != nil && !p.IllTyped {
|
||||
var files []*ast.File
|
||||
if deps || isInitial[p] {
|
||||
files = p.Syntax
|
||||
}
|
||||
ssamap[p] = prog.CreatePackage(p.Types, files, p.TypesInfo, true)
|
||||
}
|
||||
})
|
||||
|
||||
var ssapkgs []*ssa.Package
|
||||
for _, p := range initial {
|
||||
ssapkgs = append(ssapkgs, ssamap[p]) // may be nil
|
||||
}
|
||||
return prog, ssapkgs
|
||||
}
|
||||
|
||||
// CreateProgram returns a new program in SSA form, given a program
|
||||
// loaded from source. An SSA package is created for each transitively
|
||||
// error-free package of lprog.
|
||||
//
|
||||
// Code for bodies of functions is not built until Build is called
|
||||
// on the result.
|
||||
//
|
||||
// The mode parameter controls diagnostics and checking during SSA construction.
|
||||
//
|
||||
// Deprecated: Use golang.org/x/tools/go/packages and the Packages
|
||||
// function instead; see ssa.ExampleLoadPackages.
|
||||
//
|
||||
func CreateProgram(lprog *loader.Program, mode ssa.BuilderMode) *ssa.Program {
|
||||
prog := ssa.NewProgram(lprog.Fset, mode)
|
||||
|
||||
for _, info := range lprog.AllPackages {
|
||||
if info.TransitivelyErrorFree {
|
||||
prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable)
|
||||
}
|
||||
}
|
||||
|
||||
return prog
|
||||
}
|
||||
|
||||
// BuildPackage builds an SSA program with IR for a single package.
|
||||
//
|
||||
// It populates pkg by type-checking the specified file ASTs. All
|
||||
// dependencies are loaded using the importer specified by tc, which
|
||||
// typically loads compiler export data; SSA code cannot be built for
|
||||
// those packages. BuildPackage then constructs an ssa.Program with all
|
||||
// dependency packages created, and builds and returns the SSA package
|
||||
// corresponding to pkg.
|
||||
//
|
||||
// The caller must have set pkg.Path() to the import path.
|
||||
//
|
||||
// The operation fails if there were any type-checking or import errors.
|
||||
//
|
||||
// See ../ssa/example_test.go for an example.
|
||||
//
|
||||
func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, files []*ast.File, mode ssa.BuilderMode) (*ssa.Package, *types.Info, error) {
|
||||
if fset == nil {
|
||||
panic("no token.FileSet")
|
||||
}
|
||||
if pkg.Path() == "" {
|
||||
panic("package has no import path")
|
||||
}
|
||||
|
||||
info := &types.Info{
|
||||
Types: make(map[ast.Expr]types.TypeAndValue),
|
||||
Defs: make(map[*ast.Ident]types.Object),
|
||||
Uses: make(map[*ast.Ident]types.Object),
|
||||
Implicits: make(map[ast.Node]types.Object),
|
||||
Scopes: make(map[ast.Node]*types.Scope),
|
||||
Selections: make(map[*ast.SelectorExpr]*types.Selection),
|
||||
}
|
||||
if err := types.NewChecker(tc, fset, pkg, info).Files(files); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
prog := ssa.NewProgram(fset, mode)
|
||||
|
||||
// Create SSA packages for all imports.
|
||||
// Order is not significant.
|
||||
created := make(map[*types.Package]bool)
|
||||
var createAll func(pkgs []*types.Package)
|
||||
createAll = func(pkgs []*types.Package) {
|
||||
for _, p := range pkgs {
|
||||
if !created[p] {
|
||||
created[p] = true
|
||||
prog.CreatePackage(p, nil, nil, true)
|
||||
createAll(p.Imports())
|
||||
}
|
||||
}
|
||||
}
|
||||
createAll(pkg.Imports())
|
||||
|
||||
// Create and build the primary package.
|
||||
ssapkg := prog.CreatePackage(pkg, files, info, false)
|
||||
ssapkg.Build()
|
||||
return ssapkg, info, nil
|
||||
}
|
||||
234
vendor/golang.org/x/tools/go/ssa/ssautil/switch.go
generated
vendored
Normal file
234
vendor/golang.org/x/tools/go/ssa/ssautil/switch.go
generated
vendored
Normal file
@@ -0,0 +1,234 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssautil
|
||||
|
||||
// This file implements discovery of switch and type-switch constructs
|
||||
// from low-level control flow.
|
||||
//
|
||||
// Many techniques exist for compiling a high-level switch with
|
||||
// constant cases to efficient machine code. The optimal choice will
|
||||
// depend on the data type, the specific case values, the code in the
|
||||
// body of each case, and the hardware.
|
||||
// Some examples:
|
||||
// - a lookup table (for a switch that maps constants to constants)
|
||||
// - a computed goto
|
||||
// - a binary tree
|
||||
// - a perfect hash
|
||||
// - a two-level switch (to partition constant strings by their first byte).
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
|
||||
"golang.org/x/tools/go/ssa"
|
||||
)
|
||||
|
||||
// A ConstCase represents a single constant comparison.
|
||||
// It is part of a Switch.
|
||||
type ConstCase struct {
|
||||
Block *ssa.BasicBlock // block performing the comparison
|
||||
Body *ssa.BasicBlock // body of the case
|
||||
Value *ssa.Const // case comparand
|
||||
}
|
||||
|
||||
// A TypeCase represents a single type assertion.
|
||||
// It is part of a Switch.
|
||||
type TypeCase struct {
|
||||
Block *ssa.BasicBlock // block performing the type assert
|
||||
Body *ssa.BasicBlock // body of the case
|
||||
Type types.Type // case type
|
||||
Binding ssa.Value // value bound by this case
|
||||
}
|
||||
|
||||
// A Switch is a logical high-level control flow operation
|
||||
// (a multiway branch) discovered by analysis of a CFG containing
|
||||
// only if/else chains. It is not part of the ssa.Instruction set.
|
||||
//
|
||||
// One of ConstCases and TypeCases has length >= 2;
|
||||
// the other is nil.
|
||||
//
|
||||
// In a value switch, the list of cases may contain duplicate constants.
|
||||
// A type switch may contain duplicate types, or types assignable
|
||||
// to an interface type also in the list.
|
||||
// TODO(adonovan): eliminate such duplicates.
|
||||
//
|
||||
type Switch struct {
|
||||
Start *ssa.BasicBlock // block containing start of if/else chain
|
||||
X ssa.Value // the switch operand
|
||||
ConstCases []ConstCase // ordered list of constant comparisons
|
||||
TypeCases []TypeCase // ordered list of type assertions
|
||||
Default *ssa.BasicBlock // successor if all comparisons fail
|
||||
}
|
||||
|
||||
func (sw *Switch) String() string {
|
||||
// We represent each block by the String() of its
|
||||
// first Instruction, e.g. "print(42:int)".
|
||||
var buf bytes.Buffer
|
||||
if sw.ConstCases != nil {
|
||||
fmt.Fprintf(&buf, "switch %s {\n", sw.X.Name())
|
||||
for _, c := range sw.ConstCases {
|
||||
fmt.Fprintf(&buf, "case %s: %s\n", c.Value, c.Body.Instrs[0])
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(&buf, "switch %s.(type) {\n", sw.X.Name())
|
||||
for _, c := range sw.TypeCases {
|
||||
fmt.Fprintf(&buf, "case %s %s: %s\n",
|
||||
c.Binding.Name(), c.Type, c.Body.Instrs[0])
|
||||
}
|
||||
}
|
||||
if sw.Default != nil {
|
||||
fmt.Fprintf(&buf, "default: %s\n", sw.Default.Instrs[0])
|
||||
}
|
||||
fmt.Fprintf(&buf, "}")
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Switches examines the control-flow graph of fn and returns the
|
||||
// set of inferred value and type switches. A value switch tests an
|
||||
// ssa.Value for equality against two or more compile-time constant
|
||||
// values. Switches involving link-time constants (addresses) are
|
||||
// ignored. A type switch type-asserts an ssa.Value against two or
|
||||
// more types.
|
||||
//
|
||||
// The switches are returned in dominance order.
|
||||
//
|
||||
// The resulting switches do not necessarily correspond to uses of the
|
||||
// 'switch' keyword in the source: for example, a single source-level
|
||||
// switch statement with non-constant cases may result in zero, one or
|
||||
// many Switches, one per plural sequence of constant cases.
|
||||
// Switches may even be inferred from if/else- or goto-based control flow.
|
||||
// (In general, the control flow constructs of the source program
|
||||
// cannot be faithfully reproduced from the SSA representation.)
|
||||
//
|
||||
func Switches(fn *ssa.Function) []Switch {
|
||||
// Traverse the CFG in dominance order, so we don't
|
||||
// enter an if/else-chain in the middle.
|
||||
var switches []Switch
|
||||
seen := make(map[*ssa.BasicBlock]bool) // TODO(adonovan): opt: use ssa.blockSet
|
||||
for _, b := range fn.DomPreorder() {
|
||||
if x, k := isComparisonBlock(b); x != nil {
|
||||
// Block b starts a switch.
|
||||
sw := Switch{Start: b, X: x}
|
||||
valueSwitch(&sw, k, seen)
|
||||
if len(sw.ConstCases) > 1 {
|
||||
switches = append(switches, sw)
|
||||
}
|
||||
}
|
||||
|
||||
if y, x, T := isTypeAssertBlock(b); y != nil {
|
||||
// Block b starts a type switch.
|
||||
sw := Switch{Start: b, X: x}
|
||||
typeSwitch(&sw, y, T, seen)
|
||||
if len(sw.TypeCases) > 1 {
|
||||
switches = append(switches, sw)
|
||||
}
|
||||
}
|
||||
}
|
||||
return switches
|
||||
}
|
||||
|
||||
func valueSwitch(sw *Switch, k *ssa.Const, seen map[*ssa.BasicBlock]bool) {
|
||||
b := sw.Start
|
||||
x := sw.X
|
||||
for x == sw.X {
|
||||
if seen[b] {
|
||||
break
|
||||
}
|
||||
seen[b] = true
|
||||
|
||||
sw.ConstCases = append(sw.ConstCases, ConstCase{
|
||||
Block: b,
|
||||
Body: b.Succs[0],
|
||||
Value: k,
|
||||
})
|
||||
b = b.Succs[1]
|
||||
if len(b.Instrs) > 2 {
|
||||
// Block b contains not just 'if x == k',
|
||||
// so it may have side effects that
|
||||
// make it unsafe to elide.
|
||||
break
|
||||
}
|
||||
if len(b.Preds) != 1 {
|
||||
// Block b has multiple predecessors,
|
||||
// so it cannot be treated as a case.
|
||||
break
|
||||
}
|
||||
x, k = isComparisonBlock(b)
|
||||
}
|
||||
sw.Default = b
|
||||
}
|
||||
|
||||
func typeSwitch(sw *Switch, y ssa.Value, T types.Type, seen map[*ssa.BasicBlock]bool) {
|
||||
b := sw.Start
|
||||
x := sw.X
|
||||
for x == sw.X {
|
||||
if seen[b] {
|
||||
break
|
||||
}
|
||||
seen[b] = true
|
||||
|
||||
sw.TypeCases = append(sw.TypeCases, TypeCase{
|
||||
Block: b,
|
||||
Body: b.Succs[0],
|
||||
Type: T,
|
||||
Binding: y,
|
||||
})
|
||||
b = b.Succs[1]
|
||||
if len(b.Instrs) > 4 {
|
||||
// Block b contains not just
|
||||
// {TypeAssert; Extract #0; Extract #1; If}
|
||||
// so it may have side effects that
|
||||
// make it unsafe to elide.
|
||||
break
|
||||
}
|
||||
if len(b.Preds) != 1 {
|
||||
// Block b has multiple predecessors,
|
||||
// so it cannot be treated as a case.
|
||||
break
|
||||
}
|
||||
y, x, T = isTypeAssertBlock(b)
|
||||
}
|
||||
sw.Default = b
|
||||
}
|
||||
|
||||
// isComparisonBlock returns the operands (v, k) if a block ends with
|
||||
// a comparison v==k, where k is a compile-time constant.
|
||||
//
|
||||
func isComparisonBlock(b *ssa.BasicBlock) (v ssa.Value, k *ssa.Const) {
|
||||
if n := len(b.Instrs); n >= 2 {
|
||||
if i, ok := b.Instrs[n-1].(*ssa.If); ok {
|
||||
if binop, ok := i.Cond.(*ssa.BinOp); ok && binop.Block() == b && binop.Op == token.EQL {
|
||||
if k, ok := binop.Y.(*ssa.Const); ok {
|
||||
return binop.X, k
|
||||
}
|
||||
if k, ok := binop.X.(*ssa.Const); ok {
|
||||
return binop.Y, k
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// isTypeAssertBlock returns the operands (y, x, T) if a block ends with
|
||||
// a type assertion "if y, ok := x.(T); ok {".
|
||||
//
|
||||
func isTypeAssertBlock(b *ssa.BasicBlock) (y, x ssa.Value, T types.Type) {
|
||||
if n := len(b.Instrs); n >= 4 {
|
||||
if i, ok := b.Instrs[n-1].(*ssa.If); ok {
|
||||
if ext1, ok := i.Cond.(*ssa.Extract); ok && ext1.Block() == b && ext1.Index == 1 {
|
||||
if ta, ok := ext1.Tuple.(*ssa.TypeAssert); ok && ta.Block() == b {
|
||||
// hack: relies upon instruction ordering.
|
||||
if ext0, ok := b.Instrs[n-3].(*ssa.Extract); ok {
|
||||
return ext0, ta.X, ta.AssertedType
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
79
vendor/golang.org/x/tools/go/ssa/ssautil/visit.go
generated
vendored
Normal file
79
vendor/golang.org/x/tools/go/ssa/ssautil/visit.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssautil // import "golang.org/x/tools/go/ssa/ssautil"
|
||||
|
||||
import "golang.org/x/tools/go/ssa"
|
||||
|
||||
// This file defines utilities for visiting the SSA representation of
|
||||
// a Program.
|
||||
//
|
||||
// TODO(adonovan): test coverage.
|
||||
|
||||
// AllFunctions finds and returns the set of functions potentially
|
||||
// needed by program prog, as determined by a simple linker-style
|
||||
// reachability algorithm starting from the members and method-sets of
|
||||
// each package. The result may include anonymous functions and
|
||||
// synthetic wrappers.
|
||||
//
|
||||
// Precondition: all packages are built.
|
||||
//
|
||||
func AllFunctions(prog *ssa.Program) map[*ssa.Function]bool {
|
||||
visit := visitor{
|
||||
prog: prog,
|
||||
seen: make(map[*ssa.Function]bool),
|
||||
}
|
||||
visit.program()
|
||||
return visit.seen
|
||||
}
|
||||
|
||||
type visitor struct {
|
||||
prog *ssa.Program
|
||||
seen map[*ssa.Function]bool
|
||||
}
|
||||
|
||||
func (visit *visitor) program() {
|
||||
for _, pkg := range visit.prog.AllPackages() {
|
||||
for _, mem := range pkg.Members {
|
||||
if fn, ok := mem.(*ssa.Function); ok {
|
||||
visit.function(fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, T := range visit.prog.RuntimeTypes() {
|
||||
mset := visit.prog.MethodSets.MethodSet(T)
|
||||
for i, n := 0, mset.Len(); i < n; i++ {
|
||||
visit.function(visit.prog.MethodValue(mset.At(i)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (visit *visitor) function(fn *ssa.Function) {
|
||||
if !visit.seen[fn] {
|
||||
visit.seen[fn] = true
|
||||
var buf [10]*ssa.Value // avoid alloc in common case
|
||||
for _, b := range fn.Blocks {
|
||||
for _, instr := range b.Instrs {
|
||||
for _, op := range instr.Operands(buf[:0]) {
|
||||
if fn, ok := (*op).(*ssa.Function); ok {
|
||||
visit.function(fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MainPackages returns the subset of the specified packages
|
||||
// named "main" that define a main function.
|
||||
// The result may include synthetic "testmain" packages.
|
||||
func MainPackages(pkgs []*ssa.Package) []*ssa.Package {
|
||||
var mains []*ssa.Package
|
||||
for _, pkg := range pkgs {
|
||||
if pkg.Pkg.Name() == "main" && pkg.Func("main") != nil {
|
||||
mains = append(mains, pkg)
|
||||
}
|
||||
}
|
||||
return mains
|
||||
}
|
||||
273
vendor/golang.org/x/tools/go/ssa/testmain.go
generated
vendored
Normal file
273
vendor/golang.org/x/tools/go/ssa/testmain.go
generated
vendored
Normal file
@@ -0,0 +1,273 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// CreateTestMainPackage synthesizes a main package that runs all the
|
||||
// tests of the supplied packages.
|
||||
// It is closely coupled to $GOROOT/src/cmd/go/test.go and $GOROOT/src/testing.
|
||||
//
|
||||
// TODO(adonovan): throws this all away now that x/tools/go/packages
|
||||
// provides access to the actual synthetic test main files.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// FindTests returns the Test, Benchmark, and Example functions
|
||||
// (as defined by "go test") defined in the specified package,
|
||||
// and its TestMain function, if any.
|
||||
//
|
||||
// Deprecated: Use golang.org/x/tools/go/packages to access synthetic
|
||||
// testmain packages.
|
||||
func FindTests(pkg *Package) (tests, benchmarks, examples []*Function, main *Function) {
|
||||
prog := pkg.Prog
|
||||
|
||||
// The first two of these may be nil: if the program doesn't import "testing",
|
||||
// it can't contain any tests, but it may yet contain Examples.
|
||||
var testSig *types.Signature // func(*testing.T)
|
||||
var benchmarkSig *types.Signature // func(*testing.B)
|
||||
var exampleSig = types.NewSignature(nil, nil, nil, false) // func()
|
||||
|
||||
// Obtain the types from the parameters of testing.MainStart.
|
||||
if testingPkg := prog.ImportedPackage("testing"); testingPkg != nil {
|
||||
mainStart := testingPkg.Func("MainStart")
|
||||
params := mainStart.Signature.Params()
|
||||
testSig = funcField(params.At(1).Type())
|
||||
benchmarkSig = funcField(params.At(2).Type())
|
||||
|
||||
// Does the package define this function?
|
||||
// func TestMain(*testing.M)
|
||||
if f := pkg.Func("TestMain"); f != nil {
|
||||
sig := f.Type().(*types.Signature)
|
||||
starM := mainStart.Signature.Results().At(0).Type() // *testing.M
|
||||
if sig.Results().Len() == 0 &&
|
||||
sig.Params().Len() == 1 &&
|
||||
types.Identical(sig.Params().At(0).Type(), starM) {
|
||||
main = f
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(adonovan): use a stable order, e.g. lexical.
|
||||
for _, mem := range pkg.Members {
|
||||
if f, ok := mem.(*Function); ok &&
|
||||
ast.IsExported(f.Name()) &&
|
||||
strings.HasSuffix(prog.Fset.Position(f.Pos()).Filename, "_test.go") {
|
||||
|
||||
switch {
|
||||
case testSig != nil && isTestSig(f, "Test", testSig):
|
||||
tests = append(tests, f)
|
||||
case benchmarkSig != nil && isTestSig(f, "Benchmark", benchmarkSig):
|
||||
benchmarks = append(benchmarks, f)
|
||||
case isTestSig(f, "Example", exampleSig):
|
||||
examples = append(examples, f)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Like isTest, but checks the signature too.
|
||||
func isTestSig(f *Function, prefix string, sig *types.Signature) bool {
|
||||
return isTest(f.Name(), prefix) && types.Identical(f.Signature, sig)
|
||||
}
|
||||
|
||||
// Given the type of one of the three slice parameters of testing.Main,
|
||||
// returns the function type.
|
||||
func funcField(slice types.Type) *types.Signature {
|
||||
return slice.(*types.Slice).Elem().Underlying().(*types.Struct).Field(1).Type().(*types.Signature)
|
||||
}
|
||||
|
||||
// isTest tells whether name looks like a test (or benchmark, according to prefix).
|
||||
// It is a Test (say) if there is a character after Test that is not a lower-case letter.
|
||||
// We don't want TesticularCancer.
|
||||
// Plundered from $GOROOT/src/cmd/go/test.go
|
||||
func isTest(name, prefix string) bool {
|
||||
if !strings.HasPrefix(name, prefix) {
|
||||
return false
|
||||
}
|
||||
if len(name) == len(prefix) { // "Test" is ok
|
||||
return true
|
||||
}
|
||||
return ast.IsExported(name[len(prefix):])
|
||||
}
|
||||
|
||||
// CreateTestMainPackage creates and returns a synthetic "testmain"
|
||||
// package for the specified package if it defines tests, benchmarks or
|
||||
// executable examples, or nil otherwise. The new package is named
|
||||
// "main" and provides a function named "main" that runs the tests,
|
||||
// similar to the one that would be created by the 'go test' tool.
|
||||
//
|
||||
// Subsequent calls to prog.AllPackages include the new package.
|
||||
// The package pkg must belong to the program prog.
|
||||
//
|
||||
// Deprecated: Use golang.org/x/tools/go/packages to access synthetic
|
||||
// testmain packages.
|
||||
func (prog *Program) CreateTestMainPackage(pkg *Package) *Package {
|
||||
if pkg.Prog != prog {
|
||||
log.Fatal("Package does not belong to Program")
|
||||
}
|
||||
|
||||
// Template data
|
||||
var data struct {
|
||||
Pkg *Package
|
||||
Tests, Benchmarks, Examples []*Function
|
||||
Main *Function
|
||||
Go18 bool
|
||||
}
|
||||
data.Pkg = pkg
|
||||
|
||||
// Enumerate tests.
|
||||
data.Tests, data.Benchmarks, data.Examples, data.Main = FindTests(pkg)
|
||||
if data.Main == nil &&
|
||||
data.Tests == nil && data.Benchmarks == nil && data.Examples == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Synthesize source for testmain package.
|
||||
path := pkg.Pkg.Path() + "$testmain"
|
||||
tmpl := testmainTmpl
|
||||
if testingPkg := prog.ImportedPackage("testing"); testingPkg != nil {
|
||||
// In Go 1.8, testing.MainStart's first argument is an interface, not a func.
|
||||
data.Go18 = types.IsInterface(testingPkg.Func("MainStart").Signature.Params().At(0).Type())
|
||||
} else {
|
||||
// The program does not import "testing", but FindTests
|
||||
// returned non-nil, which must mean there were Examples
|
||||
// but no Test, Benchmark, or TestMain functions.
|
||||
|
||||
// We'll simply call them from testmain.main; this will
|
||||
// ensure they don't panic, but will not check any
|
||||
// "Output:" comments.
|
||||
// (We should not execute an Example that has no
|
||||
// "Output:" comment, but it's impossible to tell here.)
|
||||
tmpl = examplesOnlyTmpl
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, data); err != nil {
|
||||
log.Fatalf("internal error expanding template for %s: %v", path, err)
|
||||
}
|
||||
if false { // debugging
|
||||
fmt.Fprintln(os.Stderr, buf.String())
|
||||
}
|
||||
|
||||
// Parse and type-check the testmain package.
|
||||
f, err := parser.ParseFile(prog.Fset, path+".go", &buf, parser.Mode(0))
|
||||
if err != nil {
|
||||
log.Fatalf("internal error parsing %s: %v", path, err)
|
||||
}
|
||||
conf := types.Config{
|
||||
DisableUnusedImportCheck: true,
|
||||
Importer: importer{pkg},
|
||||
}
|
||||
files := []*ast.File{f}
|
||||
info := &types.Info{
|
||||
Types: make(map[ast.Expr]types.TypeAndValue),
|
||||
Defs: make(map[*ast.Ident]types.Object),
|
||||
Uses: make(map[*ast.Ident]types.Object),
|
||||
Implicits: make(map[ast.Node]types.Object),
|
||||
Scopes: make(map[ast.Node]*types.Scope),
|
||||
Selections: make(map[*ast.SelectorExpr]*types.Selection),
|
||||
}
|
||||
testmainPkg, err := conf.Check(path, prog.Fset, files, info)
|
||||
if err != nil {
|
||||
log.Fatalf("internal error type-checking %s: %v", path, err)
|
||||
}
|
||||
|
||||
// Create and build SSA code.
|
||||
testmain := prog.CreatePackage(testmainPkg, files, info, false)
|
||||
testmain.SetDebugMode(false)
|
||||
testmain.Build()
|
||||
testmain.Func("main").Synthetic = "test main function"
|
||||
testmain.Func("init").Synthetic = "package initializer"
|
||||
return testmain
|
||||
}
|
||||
|
||||
// An implementation of types.Importer for an already loaded SSA program.
|
||||
type importer struct {
|
||||
pkg *Package // package under test; may be non-importable
|
||||
}
|
||||
|
||||
func (imp importer) Import(path string) (*types.Package, error) {
|
||||
if p := imp.pkg.Prog.ImportedPackage(path); p != nil {
|
||||
return p.Pkg, nil
|
||||
}
|
||||
if path == imp.pkg.Pkg.Path() {
|
||||
return imp.pkg.Pkg, nil
|
||||
}
|
||||
return nil, fmt.Errorf("not found") // can't happen
|
||||
}
|
||||
|
||||
var testmainTmpl = template.Must(template.New("testmain").Parse(`
|
||||
package main
|
||||
|
||||
import "io"
|
||||
import "os"
|
||||
import "testing"
|
||||
import p {{printf "%q" .Pkg.Pkg.Path}}
|
||||
|
||||
{{if .Go18}}
|
||||
type deps struct{}
|
||||
|
||||
func (deps) ImportPath() string { return "" }
|
||||
func (deps) MatchString(pat, str string) (bool, error) { return true, nil }
|
||||
func (deps) StartCPUProfile(io.Writer) error { return nil }
|
||||
func (deps) StartTestLog(io.Writer) {}
|
||||
func (deps) StopCPUProfile() {}
|
||||
func (deps) StopTestLog() error { return nil }
|
||||
func (deps) WriteHeapProfile(io.Writer) error { return nil }
|
||||
func (deps) WriteProfileTo(string, io.Writer, int) error { return nil }
|
||||
|
||||
var match deps
|
||||
{{else}}
|
||||
func match(_, _ string) (bool, error) { return true, nil }
|
||||
{{end}}
|
||||
|
||||
func main() {
|
||||
tests := []testing.InternalTest{
|
||||
{{range .Tests}}
|
||||
{ {{printf "%q" .Name}}, p.{{.Name}} },
|
||||
{{end}}
|
||||
}
|
||||
benchmarks := []testing.InternalBenchmark{
|
||||
{{range .Benchmarks}}
|
||||
{ {{printf "%q" .Name}}, p.{{.Name}} },
|
||||
{{end}}
|
||||
}
|
||||
examples := []testing.InternalExample{
|
||||
{{range .Examples}}
|
||||
{Name: {{printf "%q" .Name}}, F: p.{{.Name}}},
|
||||
{{end}}
|
||||
}
|
||||
m := testing.MainStart(match, tests, benchmarks, examples)
|
||||
{{with .Main}}
|
||||
p.{{.Name}}(m)
|
||||
{{else}}
|
||||
os.Exit(m.Run())
|
||||
{{end}}
|
||||
}
|
||||
|
||||
`))
|
||||
|
||||
var examplesOnlyTmpl = template.Must(template.New("examples").Parse(`
|
||||
package main
|
||||
|
||||
import p {{printf "%q" .Pkg.Pkg.Path}}
|
||||
|
||||
func main() {
|
||||
{{range .Examples}}
|
||||
p.{{.Name}}()
|
||||
{{end}}
|
||||
}
|
||||
`))
|
||||
119
vendor/golang.org/x/tools/go/ssa/util.go
generated
vendored
Normal file
119
vendor/golang.org/x/tools/go/ssa/util.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines a number of miscellaneous utility functions.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
)
|
||||
|
||||
//// AST utilities
|
||||
|
||||
func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) }
|
||||
|
||||
// isBlankIdent returns true iff e is an Ident with name "_".
|
||||
// They have no associated types.Object, and thus no type.
|
||||
//
|
||||
func isBlankIdent(e ast.Expr) bool {
|
||||
id, ok := e.(*ast.Ident)
|
||||
return ok && id.Name == "_"
|
||||
}
|
||||
|
||||
//// Type utilities. Some of these belong in go/types.
|
||||
|
||||
// isPointer returns true for types whose underlying type is a pointer.
|
||||
func isPointer(typ types.Type) bool {
|
||||
_, ok := typ.Underlying().(*types.Pointer)
|
||||
return ok
|
||||
}
|
||||
|
||||
func isInterface(T types.Type) bool { return types.IsInterface(T) }
|
||||
|
||||
// deref returns a pointer's element type; otherwise it returns typ.
|
||||
func deref(typ types.Type) types.Type {
|
||||
if p, ok := typ.Underlying().(*types.Pointer); ok {
|
||||
return p.Elem()
|
||||
}
|
||||
return typ
|
||||
}
|
||||
|
||||
// recvType returns the receiver type of method obj.
|
||||
func recvType(obj *types.Func) types.Type {
|
||||
return obj.Type().(*types.Signature).Recv().Type()
|
||||
}
|
||||
|
||||
// DefaultType returns the default "typed" type for an "untyped" type;
|
||||
// it returns the incoming type for all other types. The default type
|
||||
// for untyped nil is untyped nil.
|
||||
//
|
||||
// Exported to ssa/interp.
|
||||
//
|
||||
// TODO(adonovan): use go/types.DefaultType after 1.8.
|
||||
//
|
||||
func DefaultType(typ types.Type) types.Type {
|
||||
if t, ok := typ.(*types.Basic); ok {
|
||||
k := t.Kind()
|
||||
switch k {
|
||||
case types.UntypedBool:
|
||||
k = types.Bool
|
||||
case types.UntypedInt:
|
||||
k = types.Int
|
||||
case types.UntypedRune:
|
||||
k = types.Rune
|
||||
case types.UntypedFloat:
|
||||
k = types.Float64
|
||||
case types.UntypedComplex:
|
||||
k = types.Complex128
|
||||
case types.UntypedString:
|
||||
k = types.String
|
||||
}
|
||||
typ = types.Typ[k]
|
||||
}
|
||||
return typ
|
||||
}
|
||||
|
||||
// logStack prints the formatted "start" message to stderr and
|
||||
// returns a closure that prints the corresponding "end" message.
|
||||
// Call using 'defer logStack(...)()' to show builder stack on panic.
|
||||
// Don't forget trailing parens!
|
||||
//
|
||||
func logStack(format string, args ...interface{}) func() {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
io.WriteString(os.Stderr, msg)
|
||||
io.WriteString(os.Stderr, "\n")
|
||||
return func() {
|
||||
io.WriteString(os.Stderr, msg)
|
||||
io.WriteString(os.Stderr, " end\n")
|
||||
}
|
||||
}
|
||||
|
||||
// newVar creates a 'var' for use in a types.Tuple.
|
||||
func newVar(name string, typ types.Type) *types.Var {
|
||||
return types.NewParam(token.NoPos, nil, name, typ)
|
||||
}
|
||||
|
||||
// anonVar creates an anonymous 'var' for use in a types.Tuple.
|
||||
func anonVar(typ types.Type) *types.Var {
|
||||
return newVar("", typ)
|
||||
}
|
||||
|
||||
var lenResults = types.NewTuple(anonVar(tInt))
|
||||
|
||||
// makeLen returns the len builtin specialized to type func(T)int.
|
||||
func makeLen(T types.Type) *Builtin {
|
||||
lenParams := types.NewTuple(anonVar(T))
|
||||
return &Builtin{
|
||||
name: "len",
|
||||
sig: types.NewSignature(nil, lenParams, lenResults, false),
|
||||
}
|
||||
}
|
||||
290
vendor/golang.org/x/tools/go/ssa/wrappers.go
generated
vendored
Normal file
290
vendor/golang.org/x/tools/go/ssa/wrappers.go
generated
vendored
Normal file
@@ -0,0 +1,290 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ssa
|
||||
|
||||
// This file defines synthesis of Functions that delegate to declared
|
||||
// methods; they come in three kinds:
|
||||
//
|
||||
// (1) wrappers: methods that wrap declared methods, performing
|
||||
// implicit pointer indirections and embedded field selections.
|
||||
//
|
||||
// (2) thunks: funcs that wrap declared methods. Like wrappers,
|
||||
// thunks perform indirections and field selections. The thunk's
|
||||
// first parameter is used as the receiver for the method call.
|
||||
//
|
||||
// (3) bounds: funcs that wrap declared methods. The bound's sole
|
||||
// free variable, supplied by a closure, is used as the receiver
|
||||
// for the method call. No indirections or field selections are
|
||||
// performed since they can be done before the call.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"go/types"
|
||||
)
|
||||
|
||||
// -- wrappers -----------------------------------------------------------
|
||||
|
||||
// makeWrapper returns a synthetic method that delegates to the
|
||||
// declared method denoted by meth.Obj(), first performing any
|
||||
// necessary pointer indirections or field selections implied by meth.
|
||||
//
|
||||
// The resulting method's receiver type is meth.Recv().
|
||||
//
|
||||
// This function is versatile but quite subtle! Consider the
|
||||
// following axes of variation when making changes:
|
||||
// - optional receiver indirection
|
||||
// - optional implicit field selections
|
||||
// - meth.Obj() may denote a concrete or an interface method
|
||||
// - the result may be a thunk or a wrapper.
|
||||
//
|
||||
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
|
||||
//
|
||||
func makeWrapper(prog *Program, sel *types.Selection) *Function {
|
||||
obj := sel.Obj().(*types.Func) // the declared function
|
||||
sig := sel.Type().(*types.Signature) // type of this wrapper
|
||||
|
||||
var recv *types.Var // wrapper's receiver or thunk's params[0]
|
||||
name := obj.Name()
|
||||
var description string
|
||||
var start int // first regular param
|
||||
if sel.Kind() == types.MethodExpr {
|
||||
name += "$thunk"
|
||||
description = "thunk"
|
||||
recv = sig.Params().At(0)
|
||||
start = 1
|
||||
} else {
|
||||
description = "wrapper"
|
||||
recv = sig.Recv()
|
||||
}
|
||||
|
||||
description = fmt.Sprintf("%s for %s", description, sel.Obj())
|
||||
if prog.mode&LogSource != 0 {
|
||||
defer logStack("make %s to (%s)", description, recv.Type())()
|
||||
}
|
||||
fn := &Function{
|
||||
name: name,
|
||||
method: sel,
|
||||
object: obj,
|
||||
Signature: sig,
|
||||
Synthetic: description,
|
||||
Prog: prog,
|
||||
pos: obj.Pos(),
|
||||
}
|
||||
fn.startBody()
|
||||
fn.addSpilledParam(recv)
|
||||
createParams(fn, start)
|
||||
|
||||
indices := sel.Index()
|
||||
|
||||
var v Value = fn.Locals[0] // spilled receiver
|
||||
if isPointer(sel.Recv()) {
|
||||
v = emitLoad(fn, v)
|
||||
|
||||
// For simple indirection wrappers, perform an informative nil-check:
|
||||
// "value method (T).f called using nil *T pointer"
|
||||
if len(indices) == 1 && !isPointer(recvType(obj)) {
|
||||
var c Call
|
||||
c.Call.Value = &Builtin{
|
||||
name: "ssa:wrapnilchk",
|
||||
sig: types.NewSignature(nil,
|
||||
types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)),
|
||||
types.NewTuple(anonVar(sel.Recv())), false),
|
||||
}
|
||||
c.Call.Args = []Value{
|
||||
v,
|
||||
stringConst(deref(sel.Recv()).String()),
|
||||
stringConst(sel.Obj().Name()),
|
||||
}
|
||||
c.setType(v.Type())
|
||||
v = fn.emit(&c)
|
||||
}
|
||||
}
|
||||
|
||||
// Invariant: v is a pointer, either
|
||||
// value of *A receiver param, or
|
||||
// address of A spilled receiver.
|
||||
|
||||
// We use pointer arithmetic (FieldAddr possibly followed by
|
||||
// Load) in preference to value extraction (Field possibly
|
||||
// preceded by Load).
|
||||
|
||||
v = emitImplicitSelections(fn, v, indices[:len(indices)-1])
|
||||
|
||||
// Invariant: v is a pointer, either
|
||||
// value of implicit *C field, or
|
||||
// address of implicit C field.
|
||||
|
||||
var c Call
|
||||
if r := recvType(obj); !isInterface(r) { // concrete method
|
||||
if !isPointer(r) {
|
||||
v = emitLoad(fn, v)
|
||||
}
|
||||
c.Call.Value = prog.declaredFunc(obj)
|
||||
c.Call.Args = append(c.Call.Args, v)
|
||||
} else {
|
||||
c.Call.Method = obj
|
||||
c.Call.Value = emitLoad(fn, v)
|
||||
}
|
||||
for _, arg := range fn.Params[1:] {
|
||||
c.Call.Args = append(c.Call.Args, arg)
|
||||
}
|
||||
emitTailCall(fn, &c)
|
||||
fn.finishBody()
|
||||
return fn
|
||||
}
|
||||
|
||||
// createParams creates parameters for wrapper method fn based on its
|
||||
// Signature.Params, which do not include the receiver.
|
||||
// start is the index of the first regular parameter to use.
|
||||
//
|
||||
func createParams(fn *Function, start int) {
|
||||
tparams := fn.Signature.Params()
|
||||
for i, n := start, tparams.Len(); i < n; i++ {
|
||||
fn.addParamObj(tparams.At(i))
|
||||
}
|
||||
}
|
||||
|
||||
// -- bounds -----------------------------------------------------------
|
||||
|
||||
// makeBound returns a bound method wrapper (or "bound"), a synthetic
|
||||
// function that delegates to a concrete or interface method denoted
|
||||
// by obj. The resulting function has no receiver, but has one free
|
||||
// variable which will be used as the method's receiver in the
|
||||
// tail-call.
|
||||
//
|
||||
// Use MakeClosure with such a wrapper to construct a bound method
|
||||
// closure. e.g.:
|
||||
//
|
||||
// type T int or: type T interface { meth() }
|
||||
// func (t T) meth()
|
||||
// var t T
|
||||
// f := t.meth
|
||||
// f() // calls t.meth()
|
||||
//
|
||||
// f is a closure of a synthetic wrapper defined as if by:
|
||||
//
|
||||
// f := func() { return t.meth() }
|
||||
//
|
||||
// Unlike makeWrapper, makeBound need perform no indirection or field
|
||||
// selections because that can be done before the closure is
|
||||
// constructed.
|
||||
//
|
||||
// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
|
||||
//
|
||||
func makeBound(prog *Program, obj *types.Func) *Function {
|
||||
prog.methodsMu.Lock()
|
||||
defer prog.methodsMu.Unlock()
|
||||
fn, ok := prog.bounds[obj]
|
||||
if !ok {
|
||||
description := fmt.Sprintf("bound method wrapper for %s", obj)
|
||||
if prog.mode&LogSource != 0 {
|
||||
defer logStack("%s", description)()
|
||||
}
|
||||
fn = &Function{
|
||||
name: obj.Name() + "$bound",
|
||||
object: obj,
|
||||
Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver
|
||||
Synthetic: description,
|
||||
Prog: prog,
|
||||
pos: obj.Pos(),
|
||||
}
|
||||
|
||||
fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn}
|
||||
fn.FreeVars = []*FreeVar{fv}
|
||||
fn.startBody()
|
||||
createParams(fn, 0)
|
||||
var c Call
|
||||
|
||||
if !isInterface(recvType(obj)) { // concrete
|
||||
c.Call.Value = prog.declaredFunc(obj)
|
||||
c.Call.Args = []Value{fv}
|
||||
} else {
|
||||
c.Call.Value = fv
|
||||
c.Call.Method = obj
|
||||
}
|
||||
for _, arg := range fn.Params {
|
||||
c.Call.Args = append(c.Call.Args, arg)
|
||||
}
|
||||
emitTailCall(fn, &c)
|
||||
fn.finishBody()
|
||||
|
||||
prog.bounds[obj] = fn
|
||||
}
|
||||
return fn
|
||||
}
|
||||
|
||||
// -- thunks -----------------------------------------------------------
|
||||
|
||||
// makeThunk returns a thunk, a synthetic function that delegates to a
|
||||
// concrete or interface method denoted by sel.Obj(). The resulting
|
||||
// function has no receiver, but has an additional (first) regular
|
||||
// parameter.
|
||||
//
|
||||
// Precondition: sel.Kind() == types.MethodExpr.
|
||||
//
|
||||
// type T int or: type T interface { meth() }
|
||||
// func (t T) meth()
|
||||
// f := T.meth
|
||||
// var t T
|
||||
// f(t) // calls t.meth()
|
||||
//
|
||||
// f is a synthetic wrapper defined as if by:
|
||||
//
|
||||
// f := func(t T) { return t.meth() }
|
||||
//
|
||||
// TODO(adonovan): opt: currently the stub is created even when used
|
||||
// directly in a function call: C.f(i, 0). This is less efficient
|
||||
// than inlining the stub.
|
||||
//
|
||||
// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
|
||||
//
|
||||
func makeThunk(prog *Program, sel *types.Selection) *Function {
|
||||
if sel.Kind() != types.MethodExpr {
|
||||
panic(sel)
|
||||
}
|
||||
|
||||
key := selectionKey{
|
||||
kind: sel.Kind(),
|
||||
recv: sel.Recv(),
|
||||
obj: sel.Obj(),
|
||||
index: fmt.Sprint(sel.Index()),
|
||||
indirect: sel.Indirect(),
|
||||
}
|
||||
|
||||
prog.methodsMu.Lock()
|
||||
defer prog.methodsMu.Unlock()
|
||||
|
||||
// Canonicalize key.recv to avoid constructing duplicate thunks.
|
||||
canonRecv, ok := prog.canon.At(key.recv).(types.Type)
|
||||
if !ok {
|
||||
canonRecv = key.recv
|
||||
prog.canon.Set(key.recv, canonRecv)
|
||||
}
|
||||
key.recv = canonRecv
|
||||
|
||||
fn, ok := prog.thunks[key]
|
||||
if !ok {
|
||||
fn = makeWrapper(prog, sel)
|
||||
if fn.Signature.Recv() != nil {
|
||||
panic(fn) // unexpected receiver
|
||||
}
|
||||
prog.thunks[key] = fn
|
||||
}
|
||||
return fn
|
||||
}
|
||||
|
||||
func changeRecv(s *types.Signature, recv *types.Var) *types.Signature {
|
||||
return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic())
|
||||
}
|
||||
|
||||
// selectionKey is like types.Selection but a usable map key.
|
||||
type selectionKey struct {
|
||||
kind types.SelectionKind
|
||||
recv types.Type // canonicalized via Program.canon
|
||||
obj types.Object
|
||||
index string
|
||||
indirect bool
|
||||
}
|
||||
46
vendor/golang.org/x/tools/go/types/typeutil/callee.go
generated
vendored
Normal file
46
vendor/golang.org/x/tools/go/types/typeutil/callee.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/types"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
)
|
||||
|
||||
// Callee returns the named target of a function call, if any:
|
||||
// a function, method, builtin, or variable.
|
||||
func Callee(info *types.Info, call *ast.CallExpr) types.Object {
|
||||
var obj types.Object
|
||||
switch fun := astutil.Unparen(call.Fun).(type) {
|
||||
case *ast.Ident:
|
||||
obj = info.Uses[fun] // type, var, builtin, or declared func
|
||||
case *ast.SelectorExpr:
|
||||
if sel, ok := info.Selections[fun]; ok {
|
||||
obj = sel.Obj() // method or field
|
||||
} else {
|
||||
obj = info.Uses[fun.Sel] // qualified identifier?
|
||||
}
|
||||
}
|
||||
if _, ok := obj.(*types.TypeName); ok {
|
||||
return nil // T(x) is a conversion, not a call
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
// StaticCallee returns the target (function or method) of a static
|
||||
// function call, if any. It returns nil for calls to builtins.
|
||||
func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
|
||||
if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
|
||||
return f
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func interfaceMethod(f *types.Func) bool {
|
||||
recv := f.Type().(*types.Signature).Recv()
|
||||
return recv != nil && types.IsInterface(recv.Type())
|
||||
}
|
||||
31
vendor/golang.org/x/tools/go/types/typeutil/imports.go
generated
vendored
Normal file
31
vendor/golang.org/x/tools/go/types/typeutil/imports.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeutil
|
||||
|
||||
import "go/types"
|
||||
|
||||
// Dependencies returns all dependencies of the specified packages.
|
||||
//
|
||||
// Dependent packages appear in topological order: if package P imports
|
||||
// package Q, Q appears earlier than P in the result.
|
||||
// The algorithm follows import statements in the order they
|
||||
// appear in the source code, so the result is a total order.
|
||||
//
|
||||
func Dependencies(pkgs ...*types.Package) []*types.Package {
|
||||
var result []*types.Package
|
||||
seen := make(map[*types.Package]bool)
|
||||
var visit func(pkgs []*types.Package)
|
||||
visit = func(pkgs []*types.Package) {
|
||||
for _, p := range pkgs {
|
||||
if !seen[p] {
|
||||
seen[p] = true
|
||||
visit(p.Imports())
|
||||
result = append(result, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
visit(pkgs)
|
||||
return result
|
||||
}
|
||||
313
vendor/golang.org/x/tools/go/types/typeutil/map.go
generated
vendored
Normal file
313
vendor/golang.org/x/tools/go/types/typeutil/map.go
generated
vendored
Normal file
@@ -0,0 +1,313 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package typeutil defines various utilities for types, such as Map,
|
||||
// a mapping from types.Type to interface{} values.
|
||||
package typeutil // import "golang.org/x/tools/go/types/typeutil"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Map is a hash-table-based mapping from types (types.Type) to
|
||||
// arbitrary interface{} values. The concrete types that implement
|
||||
// the Type interface are pointers. Since they are not canonicalized,
|
||||
// == cannot be used to check for equivalence, and thus we cannot
|
||||
// simply use a Go map.
|
||||
//
|
||||
// Just as with map[K]V, a nil *Map is a valid empty map.
|
||||
//
|
||||
// Not thread-safe.
|
||||
//
|
||||
type Map struct {
|
||||
hasher Hasher // shared by many Maps
|
||||
table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
|
||||
length int // number of map entries
|
||||
}
|
||||
|
||||
// entry is an entry (key/value association) in a hash bucket.
|
||||
type entry struct {
|
||||
key types.Type
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// SetHasher sets the hasher used by Map.
|
||||
//
|
||||
// All Hashers are functionally equivalent but contain internal state
|
||||
// used to cache the results of hashing previously seen types.
|
||||
//
|
||||
// A single Hasher created by MakeHasher() may be shared among many
|
||||
// Maps. This is recommended if the instances have many keys in
|
||||
// common, as it will amortize the cost of hash computation.
|
||||
//
|
||||
// A Hasher may grow without bound as new types are seen. Even when a
|
||||
// type is deleted from the map, the Hasher never shrinks, since other
|
||||
// types in the map may reference the deleted type indirectly.
|
||||
//
|
||||
// Hashers are not thread-safe, and read-only operations such as
|
||||
// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
|
||||
// read-lock) is require around all Map operations if a shared
|
||||
// hasher is accessed from multiple threads.
|
||||
//
|
||||
// If SetHasher is not called, the Map will create a private hasher at
|
||||
// the first call to Insert.
|
||||
//
|
||||
func (m *Map) SetHasher(hasher Hasher) {
|
||||
m.hasher = hasher
|
||||
}
|
||||
|
||||
// Delete removes the entry with the given key, if any.
|
||||
// It returns true if the entry was found.
|
||||
//
|
||||
func (m *Map) Delete(key types.Type) bool {
|
||||
if m != nil && m.table != nil {
|
||||
hash := m.hasher.Hash(key)
|
||||
bucket := m.table[hash]
|
||||
for i, e := range bucket {
|
||||
if e.key != nil && types.Identical(key, e.key) {
|
||||
// We can't compact the bucket as it
|
||||
// would disturb iterators.
|
||||
bucket[i] = entry{}
|
||||
m.length--
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// At returns the map entry for the given key.
|
||||
// The result is nil if the entry is not present.
|
||||
//
|
||||
func (m *Map) At(key types.Type) interface{} {
|
||||
if m != nil && m.table != nil {
|
||||
for _, e := range m.table[m.hasher.Hash(key)] {
|
||||
if e.key != nil && types.Identical(key, e.key) {
|
||||
return e.value
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set sets the map entry for key to val,
|
||||
// and returns the previous entry, if any.
|
||||
func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) {
|
||||
if m.table != nil {
|
||||
hash := m.hasher.Hash(key)
|
||||
bucket := m.table[hash]
|
||||
var hole *entry
|
||||
for i, e := range bucket {
|
||||
if e.key == nil {
|
||||
hole = &bucket[i]
|
||||
} else if types.Identical(key, e.key) {
|
||||
prev = e.value
|
||||
bucket[i].value = value
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if hole != nil {
|
||||
*hole = entry{key, value} // overwrite deleted entry
|
||||
} else {
|
||||
m.table[hash] = append(bucket, entry{key, value})
|
||||
}
|
||||
} else {
|
||||
if m.hasher.memo == nil {
|
||||
m.hasher = MakeHasher()
|
||||
}
|
||||
hash := m.hasher.Hash(key)
|
||||
m.table = map[uint32][]entry{hash: {entry{key, value}}}
|
||||
}
|
||||
|
||||
m.length++
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of map entries.
|
||||
func (m *Map) Len() int {
|
||||
if m != nil {
|
||||
return m.length
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Iterate calls function f on each entry in the map in unspecified order.
|
||||
//
|
||||
// If f should mutate the map, Iterate provides the same guarantees as
|
||||
// Go maps: if f deletes a map entry that Iterate has not yet reached,
|
||||
// f will not be invoked for it, but if f inserts a map entry that
|
||||
// Iterate has not yet reached, whether or not f will be invoked for
|
||||
// it is unspecified.
|
||||
//
|
||||
func (m *Map) Iterate(f func(key types.Type, value interface{})) {
|
||||
if m != nil {
|
||||
for _, bucket := range m.table {
|
||||
for _, e := range bucket {
|
||||
if e.key != nil {
|
||||
f(e.key, e.value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Keys returns a new slice containing the set of map keys.
|
||||
// The order is unspecified.
|
||||
func (m *Map) Keys() []types.Type {
|
||||
keys := make([]types.Type, 0, m.Len())
|
||||
m.Iterate(func(key types.Type, _ interface{}) {
|
||||
keys = append(keys, key)
|
||||
})
|
||||
return keys
|
||||
}
|
||||
|
||||
func (m *Map) toString(values bool) string {
|
||||
if m == nil {
|
||||
return "{}"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprint(&buf, "{")
|
||||
sep := ""
|
||||
m.Iterate(func(key types.Type, value interface{}) {
|
||||
fmt.Fprint(&buf, sep)
|
||||
sep = ", "
|
||||
fmt.Fprint(&buf, key)
|
||||
if values {
|
||||
fmt.Fprintf(&buf, ": %q", value)
|
||||
}
|
||||
})
|
||||
fmt.Fprint(&buf, "}")
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// String returns a string representation of the map's entries.
|
||||
// Values are printed using fmt.Sprintf("%v", v).
|
||||
// Order is unspecified.
|
||||
//
|
||||
func (m *Map) String() string {
|
||||
return m.toString(true)
|
||||
}
|
||||
|
||||
// KeysString returns a string representation of the map's key set.
|
||||
// Order is unspecified.
|
||||
//
|
||||
func (m *Map) KeysString() string {
|
||||
return m.toString(false)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Hasher
|
||||
|
||||
// A Hasher maps each type to its hash value.
|
||||
// For efficiency, a hasher uses memoization; thus its memory
|
||||
// footprint grows monotonically over time.
|
||||
// Hashers are not thread-safe.
|
||||
// Hashers have reference semantics.
|
||||
// Call MakeHasher to create a Hasher.
|
||||
type Hasher struct {
|
||||
memo map[types.Type]uint32
|
||||
}
|
||||
|
||||
// MakeHasher returns a new Hasher instance.
|
||||
func MakeHasher() Hasher {
|
||||
return Hasher{make(map[types.Type]uint32)}
|
||||
}
|
||||
|
||||
// Hash computes a hash value for the given type t such that
|
||||
// Identical(t, t') => Hash(t) == Hash(t').
|
||||
func (h Hasher) Hash(t types.Type) uint32 {
|
||||
hash, ok := h.memo[t]
|
||||
if !ok {
|
||||
hash = h.hashFor(t)
|
||||
h.memo[t] = hash
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
// hashString computes the Fowler–Noll–Vo hash of s.
|
||||
func hashString(s string) uint32 {
|
||||
var h uint32
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint32(s[i])
|
||||
h *= 16777619
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// hashFor computes the hash of t.
|
||||
func (h Hasher) hashFor(t types.Type) uint32 {
|
||||
// See Identical for rationale.
|
||||
switch t := t.(type) {
|
||||
case *types.Basic:
|
||||
return uint32(t.Kind())
|
||||
|
||||
case *types.Array:
|
||||
return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
|
||||
|
||||
case *types.Slice:
|
||||
return 9049 + 2*h.Hash(t.Elem())
|
||||
|
||||
case *types.Struct:
|
||||
var hash uint32 = 9059
|
||||
for i, n := 0, t.NumFields(); i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if f.Anonymous() {
|
||||
hash += 8861
|
||||
}
|
||||
hash += hashString(t.Tag(i))
|
||||
hash += hashString(f.Name()) // (ignore f.Pkg)
|
||||
hash += h.Hash(f.Type())
|
||||
}
|
||||
return hash
|
||||
|
||||
case *types.Pointer:
|
||||
return 9067 + 2*h.Hash(t.Elem())
|
||||
|
||||
case *types.Signature:
|
||||
var hash uint32 = 9091
|
||||
if t.Variadic() {
|
||||
hash *= 8863
|
||||
}
|
||||
return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
|
||||
|
||||
case *types.Interface:
|
||||
var hash uint32 = 9103
|
||||
for i, n := 0, t.NumMethods(); i < n; i++ {
|
||||
// See go/types.identicalMethods for rationale.
|
||||
// Method order is not significant.
|
||||
// Ignore m.Pkg().
|
||||
m := t.Method(i)
|
||||
hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
|
||||
}
|
||||
return hash
|
||||
|
||||
case *types.Map:
|
||||
return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
|
||||
|
||||
case *types.Chan:
|
||||
return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
|
||||
|
||||
case *types.Named:
|
||||
// Not safe with a copying GC; objects may move.
|
||||
return uint32(reflect.ValueOf(t.Obj()).Pointer())
|
||||
|
||||
case *types.Tuple:
|
||||
return h.hashTuple(t)
|
||||
}
|
||||
panic(t)
|
||||
}
|
||||
|
||||
func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
|
||||
// See go/types.identicalTypes for rationale.
|
||||
n := tuple.Len()
|
||||
var hash uint32 = 9137 + 2*uint32(n)
|
||||
for i := 0; i < n; i++ {
|
||||
hash += 3 * h.Hash(tuple.At(i).Type())
|
||||
}
|
||||
return hash
|
||||
}
|
||||
72
vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
generated
vendored
Normal file
72
vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file implements a cache of method sets.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A MethodSetCache records the method set of each type T for which
|
||||
// MethodSet(T) is called so that repeat queries are fast.
|
||||
// The zero value is a ready-to-use cache instance.
|
||||
type MethodSetCache struct {
|
||||
mu sync.Mutex
|
||||
named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
|
||||
others map[types.Type]*types.MethodSet // all other types
|
||||
}
|
||||
|
||||
// MethodSet returns the method set of type T. It is thread-safe.
|
||||
//
|
||||
// If cache is nil, this function is equivalent to types.NewMethodSet(T).
|
||||
// Utility functions can thus expose an optional *MethodSetCache
|
||||
// parameter to clients that care about performance.
|
||||
//
|
||||
func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
|
||||
if cache == nil {
|
||||
return types.NewMethodSet(T)
|
||||
}
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
|
||||
switch T := T.(type) {
|
||||
case *types.Named:
|
||||
return cache.lookupNamed(T).value
|
||||
|
||||
case *types.Pointer:
|
||||
if N, ok := T.Elem().(*types.Named); ok {
|
||||
return cache.lookupNamed(N).pointer
|
||||
}
|
||||
}
|
||||
|
||||
// all other types
|
||||
// (The map uses pointer equivalence, not type identity.)
|
||||
mset := cache.others[T]
|
||||
if mset == nil {
|
||||
mset = types.NewMethodSet(T)
|
||||
if cache.others == nil {
|
||||
cache.others = make(map[types.Type]*types.MethodSet)
|
||||
}
|
||||
cache.others[T] = mset
|
||||
}
|
||||
return mset
|
||||
}
|
||||
|
||||
func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
|
||||
if cache.named == nil {
|
||||
cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
|
||||
}
|
||||
// Avoid recomputing mset(*T) for each distinct Pointer
|
||||
// instance whose underlying type is a named type.
|
||||
msets, ok := cache.named[named]
|
||||
if !ok {
|
||||
msets.value = types.NewMethodSet(named)
|
||||
msets.pointer = types.NewMethodSet(types.NewPointer(named))
|
||||
cache.named[named] = msets
|
||||
}
|
||||
return msets
|
||||
}
|
||||
52
vendor/golang.org/x/tools/go/types/typeutil/ui.go
generated
vendored
Normal file
52
vendor/golang.org/x/tools/go/types/typeutil/ui.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typeutil
|
||||
|
||||
// This file defines utilities for user interfaces that display types.
|
||||
|
||||
import "go/types"
|
||||
|
||||
// IntuitiveMethodSet returns the intuitive method set of a type T,
|
||||
// which is the set of methods you can call on an addressable value of
|
||||
// that type.
|
||||
//
|
||||
// The result always contains MethodSet(T), and is exactly MethodSet(T)
|
||||
// for interface types and for pointer-to-concrete types.
|
||||
// For all other concrete types T, the result additionally
|
||||
// contains each method belonging to *T if there is no identically
|
||||
// named method on T itself.
|
||||
//
|
||||
// This corresponds to user intuition about method sets;
|
||||
// this function is intended only for user interfaces.
|
||||
//
|
||||
// The order of the result is as for types.MethodSet(T).
|
||||
//
|
||||
func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
|
||||
isPointerToConcrete := func(T types.Type) bool {
|
||||
ptr, ok := T.(*types.Pointer)
|
||||
return ok && !types.IsInterface(ptr.Elem())
|
||||
}
|
||||
|
||||
var result []*types.Selection
|
||||
mset := msets.MethodSet(T)
|
||||
if types.IsInterface(T) || isPointerToConcrete(T) {
|
||||
for i, n := 0, mset.Len(); i < n; i++ {
|
||||
result = append(result, mset.At(i))
|
||||
}
|
||||
} else {
|
||||
// T is some other concrete type.
|
||||
// Report methods of T and *T, preferring those of T.
|
||||
pmset := msets.MethodSet(types.NewPointer(T))
|
||||
for i, n := 0, pmset.Len(); i < n; i++ {
|
||||
meth := pmset.At(i)
|
||||
if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
|
||||
meth = m
|
||||
}
|
||||
result = append(result, meth)
|
||||
}
|
||||
|
||||
}
|
||||
return result
|
||||
}
|
||||
5
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
5
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
@@ -77,6 +77,7 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root
|
||||
}
|
||||
}
|
||||
|
||||
// walkDir creates a walker and starts fastwalk with this walker.
|
||||
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
|
||||
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
|
||||
if opts.Debug {
|
||||
@@ -114,7 +115,7 @@ type walker struct {
|
||||
ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files.
|
||||
}
|
||||
|
||||
// init initializes the walker based on its Options.
|
||||
// init initializes the walker based on its Options
|
||||
func (w *walker) init() {
|
||||
var ignoredPaths []string
|
||||
if w.root.Type == RootModuleCache {
|
||||
@@ -167,6 +168,7 @@ func (w *walker) getIgnoredDirs(path string) []string {
|
||||
return ignoredDirs
|
||||
}
|
||||
|
||||
// shouldSkipDir reports whether the file should be skipped or not.
|
||||
func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool {
|
||||
for _, ignoredDir := range w.ignoredDirs {
|
||||
if os.SameFile(fi, ignoredDir) {
|
||||
@@ -180,6 +182,7 @@ func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// walk walks through the given path.
|
||||
func (w *walker) walk(path string, typ os.FileMode) error {
|
||||
dir := filepath.Dir(path)
|
||||
if typ.IsRegular() {
|
||||
|
||||
100
vendor/golang.org/x/tools/internal/span/parse.go
generated
vendored
100
vendor/golang.org/x/tools/internal/span/parse.go
generated
vendored
@@ -1,100 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Parse returns the location represented by the input.
|
||||
// All inputs are valid locations, as they can always be a pure filename.
|
||||
// The returned span will be normalized, and thus if printed may produce a
|
||||
// different string.
|
||||
func Parse(input string) Span {
|
||||
// :0:0#0-0:0#0
|
||||
valid := input
|
||||
var hold, offset int
|
||||
hadCol := false
|
||||
suf := rstripSuffix(input)
|
||||
if suf.sep == "#" {
|
||||
offset = suf.num
|
||||
suf = rstripSuffix(suf.remains)
|
||||
}
|
||||
if suf.sep == ":" {
|
||||
valid = suf.remains
|
||||
hold = suf.num
|
||||
hadCol = true
|
||||
suf = rstripSuffix(suf.remains)
|
||||
}
|
||||
switch {
|
||||
case suf.sep == ":":
|
||||
return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), Point{})
|
||||
case suf.sep == "-":
|
||||
// we have a span, fall out of the case to continue
|
||||
default:
|
||||
// separator not valid, rewind to either the : or the start
|
||||
return New(NewURI(valid), NewPoint(hold, 0, offset), Point{})
|
||||
}
|
||||
// only the span form can get here
|
||||
// at this point we still don't know what the numbers we have mean
|
||||
// if have not yet seen a : then we might have either a line or a column depending
|
||||
// on whether start has a column or not
|
||||
// we build an end point and will fix it later if needed
|
||||
end := NewPoint(suf.num, hold, offset)
|
||||
hold, offset = 0, 0
|
||||
suf = rstripSuffix(suf.remains)
|
||||
if suf.sep == "#" {
|
||||
offset = suf.num
|
||||
suf = rstripSuffix(suf.remains)
|
||||
}
|
||||
if suf.sep != ":" {
|
||||
// turns out we don't have a span after all, rewind
|
||||
return New(NewURI(valid), end, Point{})
|
||||
}
|
||||
valid = suf.remains
|
||||
hold = suf.num
|
||||
suf = rstripSuffix(suf.remains)
|
||||
if suf.sep != ":" {
|
||||
// line#offset only
|
||||
return New(NewURI(valid), NewPoint(hold, 0, offset), end)
|
||||
}
|
||||
// we have a column, so if end only had one number, it is also the column
|
||||
if !hadCol {
|
||||
end = NewPoint(suf.num, end.v.Line, end.v.Offset)
|
||||
}
|
||||
return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), end)
|
||||
}
|
||||
|
||||
type suffix struct {
|
||||
remains string
|
||||
sep string
|
||||
num int
|
||||
}
|
||||
|
||||
func rstripSuffix(input string) suffix {
|
||||
if len(input) == 0 {
|
||||
return suffix{"", "", -1}
|
||||
}
|
||||
remains := input
|
||||
num := -1
|
||||
// first see if we have a number at the end
|
||||
last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' })
|
||||
if last >= 0 && last < len(remains)-1 {
|
||||
number, err := strconv.ParseInt(remains[last+1:], 10, 64)
|
||||
if err == nil {
|
||||
num = int(number)
|
||||
remains = remains[:last+1]
|
||||
}
|
||||
}
|
||||
// now see if we have a trailing separator
|
||||
r, w := utf8.DecodeLastRuneInString(remains)
|
||||
if r != ':' && r != '#' && r == '#' {
|
||||
return suffix{input, "", -1}
|
||||
}
|
||||
remains = remains[:len(remains)-w]
|
||||
return suffix{remains, string(r), num}
|
||||
}
|
||||
285
vendor/golang.org/x/tools/internal/span/span.go
generated
vendored
285
vendor/golang.org/x/tools/internal/span/span.go
generated
vendored
@@ -1,285 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package span contains support for representing with positions and ranges in
|
||||
// text files.
|
||||
package span
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
)
|
||||
|
||||
// Span represents a source code range in standardized form.
|
||||
type Span struct {
|
||||
v span
|
||||
}
|
||||
|
||||
// Point represents a single point within a file.
|
||||
// In general this should only be used as part of a Span, as on its own it
|
||||
// does not carry enough information.
|
||||
type Point struct {
|
||||
v point
|
||||
}
|
||||
|
||||
type span struct {
|
||||
URI URI `json:"uri"`
|
||||
Start point `json:"start"`
|
||||
End point `json:"end"`
|
||||
}
|
||||
|
||||
type point struct {
|
||||
Line int `json:"line"`
|
||||
Column int `json:"column"`
|
||||
Offset int `json:"offset"`
|
||||
}
|
||||
|
||||
// Invalid is a span that reports false from IsValid
|
||||
var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}}
|
||||
|
||||
var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}}
|
||||
|
||||
// Converter is the interface to an object that can convert between line:column
|
||||
// and offset forms for a single file.
|
||||
type Converter interface {
|
||||
//ToPosition converts from an offset to a line:column pair.
|
||||
ToPosition(offset int) (int, int, error)
|
||||
//ToOffset converts from a line:column pair to an offset.
|
||||
ToOffset(line, col int) (int, error)
|
||||
}
|
||||
|
||||
func New(uri URI, start Point, end Point) Span {
|
||||
s := Span{v: span{URI: uri, Start: start.v, End: end.v}}
|
||||
s.v.clean()
|
||||
return s
|
||||
}
|
||||
|
||||
func NewPoint(line, col, offset int) Point {
|
||||
p := Point{v: point{Line: line, Column: col, Offset: offset}}
|
||||
p.v.clean()
|
||||
return p
|
||||
}
|
||||
|
||||
func Compare(a, b Span) int {
|
||||
if r := CompareURI(a.URI(), b.URI()); r != 0 {
|
||||
return r
|
||||
}
|
||||
if r := comparePoint(a.v.Start, b.v.Start); r != 0 {
|
||||
return r
|
||||
}
|
||||
return comparePoint(a.v.End, b.v.End)
|
||||
}
|
||||
|
||||
func ComparePoint(a, b Point) int {
|
||||
return comparePoint(a.v, b.v)
|
||||
}
|
||||
|
||||
func comparePoint(a, b point) int {
|
||||
if !a.hasPosition() {
|
||||
if a.Offset < b.Offset {
|
||||
return -1
|
||||
}
|
||||
if a.Offset > b.Offset {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
if a.Line < b.Line {
|
||||
return -1
|
||||
}
|
||||
if a.Line > b.Line {
|
||||
return 1
|
||||
}
|
||||
if a.Column < b.Column {
|
||||
return -1
|
||||
}
|
||||
if a.Column > b.Column {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s Span) HasPosition() bool { return s.v.Start.hasPosition() }
|
||||
func (s Span) HasOffset() bool { return s.v.Start.hasOffset() }
|
||||
func (s Span) IsValid() bool { return s.v.Start.isValid() }
|
||||
func (s Span) IsPoint() bool { return s.v.Start == s.v.End }
|
||||
func (s Span) URI() URI { return s.v.URI }
|
||||
func (s Span) Start() Point { return Point{s.v.Start} }
|
||||
func (s Span) End() Point { return Point{s.v.End} }
|
||||
func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) }
|
||||
func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) }
|
||||
|
||||
func (p Point) HasPosition() bool { return p.v.hasPosition() }
|
||||
func (p Point) HasOffset() bool { return p.v.hasOffset() }
|
||||
func (p Point) IsValid() bool { return p.v.isValid() }
|
||||
func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) }
|
||||
func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) }
|
||||
func (p Point) Line() int {
|
||||
if !p.v.hasPosition() {
|
||||
panic(fmt.Errorf("position not set in %v", p.v))
|
||||
}
|
||||
return p.v.Line
|
||||
}
|
||||
func (p Point) Column() int {
|
||||
if !p.v.hasPosition() {
|
||||
panic(fmt.Errorf("position not set in %v", p.v))
|
||||
}
|
||||
return p.v.Column
|
||||
}
|
||||
func (p Point) Offset() int {
|
||||
if !p.v.hasOffset() {
|
||||
panic(fmt.Errorf("offset not set in %v", p.v))
|
||||
}
|
||||
return p.v.Offset
|
||||
}
|
||||
|
||||
func (p point) hasPosition() bool { return p.Line > 0 }
|
||||
func (p point) hasOffset() bool { return p.Offset >= 0 }
|
||||
func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() }
|
||||
func (p point) isZero() bool {
|
||||
return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0)
|
||||
}
|
||||
|
||||
func (s *span) clean() {
|
||||
//this presumes the points are already clean
|
||||
if !s.End.isValid() || (s.End == point{}) {
|
||||
s.End = s.Start
|
||||
}
|
||||
}
|
||||
|
||||
func (p *point) clean() {
|
||||
if p.Line < 0 {
|
||||
p.Line = 0
|
||||
}
|
||||
if p.Column <= 0 {
|
||||
if p.Line > 0 {
|
||||
p.Column = 1
|
||||
} else {
|
||||
p.Column = 0
|
||||
}
|
||||
}
|
||||
if p.Offset == 0 && (p.Line > 1 || p.Column > 1) {
|
||||
p.Offset = -1
|
||||
}
|
||||
}
|
||||
|
||||
// Format implements fmt.Formatter to print the Location in a standard form.
|
||||
// The format produced is one that can be read back in using Parse.
|
||||
func (s Span) Format(f fmt.State, c rune) {
|
||||
fullForm := f.Flag('+')
|
||||
preferOffset := f.Flag('#')
|
||||
// we should always have a uri, simplify if it is file format
|
||||
//TODO: make sure the end of the uri is unambiguous
|
||||
uri := string(s.v.URI)
|
||||
if c == 'f' {
|
||||
uri = path.Base(uri)
|
||||
} else if !fullForm {
|
||||
uri = s.v.URI.Filename()
|
||||
}
|
||||
fmt.Fprint(f, uri)
|
||||
if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) {
|
||||
return
|
||||
}
|
||||
// see which bits of start to write
|
||||
printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition())
|
||||
printLine := s.HasPosition() && (fullForm || !printOffset)
|
||||
printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1))
|
||||
fmt.Fprint(f, ":")
|
||||
if printLine {
|
||||
fmt.Fprintf(f, "%d", s.v.Start.Line)
|
||||
}
|
||||
if printColumn {
|
||||
fmt.Fprintf(f, ":%d", s.v.Start.Column)
|
||||
}
|
||||
if printOffset {
|
||||
fmt.Fprintf(f, "#%d", s.v.Start.Offset)
|
||||
}
|
||||
// start is written, do we need end?
|
||||
if s.IsPoint() {
|
||||
return
|
||||
}
|
||||
// we don't print the line if it did not change
|
||||
printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line)
|
||||
fmt.Fprint(f, "-")
|
||||
if printLine {
|
||||
fmt.Fprintf(f, "%d", s.v.End.Line)
|
||||
}
|
||||
if printColumn {
|
||||
if printLine {
|
||||
fmt.Fprint(f, ":")
|
||||
}
|
||||
fmt.Fprintf(f, "%d", s.v.End.Column)
|
||||
}
|
||||
if printOffset {
|
||||
fmt.Fprintf(f, "#%d", s.v.End.Offset)
|
||||
}
|
||||
}
|
||||
|
||||
func (s Span) WithPosition(c Converter) (Span, error) {
|
||||
if err := s.update(c, true, false); err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s Span) WithOffset(c Converter) (Span, error) {
|
||||
if err := s.update(c, false, true); err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s Span) WithAll(c Converter) (Span, error) {
|
||||
if err := s.update(c, true, true); err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Span) update(c Converter, withPos, withOffset bool) error {
|
||||
if !s.IsValid() {
|
||||
return fmt.Errorf("cannot add information to an invalid span")
|
||||
}
|
||||
if withPos && !s.HasPosition() {
|
||||
if err := s.v.Start.updatePosition(c); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.v.End.Offset == s.v.Start.Offset {
|
||||
s.v.End = s.v.Start
|
||||
} else if err := s.v.End.updatePosition(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) {
|
||||
if err := s.v.Start.updateOffset(c); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column {
|
||||
s.v.End.Offset = s.v.Start.Offset
|
||||
} else if err := s.v.End.updateOffset(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *point) updatePosition(c Converter) error {
|
||||
line, col, err := c.ToPosition(p.Offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Line = line
|
||||
p.Column = col
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *point) updateOffset(c Converter) error {
|
||||
offset, err := c.ToOffset(p.Line, p.Column)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Offset = offset
|
||||
return nil
|
||||
}
|
||||
151
vendor/golang.org/x/tools/internal/span/token.go
generated
vendored
151
vendor/golang.org/x/tools/internal/span/token.go
generated
vendored
@@ -1,151 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
)
|
||||
|
||||
// Range represents a source code range in token.Pos form.
|
||||
// It also carries the FileSet that produced the positions, so that it is
|
||||
// self contained.
|
||||
type Range struct {
|
||||
FileSet *token.FileSet
|
||||
Start token.Pos
|
||||
End token.Pos
|
||||
}
|
||||
|
||||
// TokenConverter is a Converter backed by a token file set and file.
|
||||
// It uses the file set methods to work out the conversions, which
|
||||
// makes it fast and does not require the file contents.
|
||||
type TokenConverter struct {
|
||||
fset *token.FileSet
|
||||
file *token.File
|
||||
}
|
||||
|
||||
// NewRange creates a new Range from a FileSet and two positions.
|
||||
// To represent a point pass a 0 as the end pos.
|
||||
func NewRange(fset *token.FileSet, start, end token.Pos) Range {
|
||||
return Range{
|
||||
FileSet: fset,
|
||||
Start: start,
|
||||
End: end,
|
||||
}
|
||||
}
|
||||
|
||||
// NewTokenConverter returns an implementation of Converter backed by a
|
||||
// token.File.
|
||||
func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter {
|
||||
return &TokenConverter{fset: fset, file: f}
|
||||
}
|
||||
|
||||
// NewContentConverter returns an implementation of Converter for the
|
||||
// given file content.
|
||||
func NewContentConverter(filename string, content []byte) *TokenConverter {
|
||||
fset := token.NewFileSet()
|
||||
f := fset.AddFile(filename, -1, len(content))
|
||||
f.SetLinesForContent(content)
|
||||
return &TokenConverter{fset: fset, file: f}
|
||||
}
|
||||
|
||||
// IsPoint returns true if the range represents a single point.
|
||||
func (r Range) IsPoint() bool {
|
||||
return r.Start == r.End
|
||||
}
|
||||
|
||||
// Span converts a Range to a Span that represents the Range.
|
||||
// It will fill in all the members of the Span, calculating the line and column
|
||||
// information.
|
||||
func (r Range) Span() (Span, error) {
|
||||
f := r.FileSet.File(r.Start)
|
||||
if f == nil {
|
||||
return Span{}, fmt.Errorf("file not found in FileSet")
|
||||
}
|
||||
s := Span{v: span{URI: FileURI(f.Name())}}
|
||||
var err error
|
||||
s.v.Start.Offset, err = offset(f, r.Start)
|
||||
if err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
if r.End.IsValid() {
|
||||
s.v.End.Offset, err = offset(f, r.End)
|
||||
if err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
}
|
||||
s.v.Start.clean()
|
||||
s.v.End.clean()
|
||||
s.v.clean()
|
||||
converter := NewTokenConverter(r.FileSet, f)
|
||||
return s.WithPosition(converter)
|
||||
}
|
||||
|
||||
// offset is a copy of the Offset function in go/token, but with the adjustment
|
||||
// that it does not panic on invalid positions.
|
||||
func offset(f *token.File, pos token.Pos) (int, error) {
|
||||
if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() {
|
||||
return 0, fmt.Errorf("invalid pos")
|
||||
}
|
||||
return int(pos) - f.Base(), nil
|
||||
}
|
||||
|
||||
// Range converts a Span to a Range that represents the Span for the supplied
|
||||
// File.
|
||||
func (s Span) Range(converter *TokenConverter) (Range, error) {
|
||||
s, err := s.WithOffset(converter)
|
||||
if err != nil {
|
||||
return Range{}, err
|
||||
}
|
||||
// go/token will panic if the offset is larger than the file's size,
|
||||
// so check here to avoid panicking.
|
||||
if s.Start().Offset() > converter.file.Size() {
|
||||
return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size())
|
||||
}
|
||||
if s.End().Offset() > converter.file.Size() {
|
||||
return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size())
|
||||
}
|
||||
return Range{
|
||||
FileSet: converter.fset,
|
||||
Start: converter.file.Pos(s.Start().Offset()),
|
||||
End: converter.file.Pos(s.End().Offset()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *TokenConverter) ToPosition(offset int) (int, int, error) {
|
||||
if offset > l.file.Size() {
|
||||
return 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, l.file.Size())
|
||||
}
|
||||
pos := l.file.Pos(offset)
|
||||
p := l.fset.Position(pos)
|
||||
if offset == l.file.Size() {
|
||||
return p.Line + 1, 1, nil
|
||||
}
|
||||
return p.Line, p.Column, nil
|
||||
}
|
||||
|
||||
func (l *TokenConverter) ToOffset(line, col int) (int, error) {
|
||||
if line < 0 {
|
||||
return -1, fmt.Errorf("line is not valid")
|
||||
}
|
||||
lineMax := l.file.LineCount() + 1
|
||||
if line > lineMax {
|
||||
return -1, fmt.Errorf("line is beyond end of file %v", lineMax)
|
||||
} else if line == lineMax {
|
||||
if col > 1 {
|
||||
return -1, fmt.Errorf("column is beyond end of file")
|
||||
}
|
||||
// at the end of the file, allowing for a trailing eol
|
||||
return l.file.Size(), nil
|
||||
}
|
||||
pos := lineStart(l.file, line)
|
||||
if !pos.IsValid() {
|
||||
return -1, fmt.Errorf("line is not in file")
|
||||
}
|
||||
// we assume that column is in bytes here, and that the first byte of a
|
||||
// line is at column 1
|
||||
pos += token.Pos(col - 1)
|
||||
return offset(l.file, pos)
|
||||
}
|
||||
39
vendor/golang.org/x/tools/internal/span/token111.go
generated
vendored
39
vendor/golang.org/x/tools/internal/span/token111.go
generated
vendored
@@ -1,39 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.12
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
)
|
||||
|
||||
// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go
|
||||
// versions <= 1.11, we borrow logic from the analysisutil package.
|
||||
// TODO(rstambler): Delete this file when we no longer support Go 1.11.
|
||||
func lineStart(f *token.File, line int) token.Pos {
|
||||
// Use binary search to find the start offset of this line.
|
||||
|
||||
min := 0 // inclusive
|
||||
max := f.Size() // exclusive
|
||||
for {
|
||||
offset := (min + max) / 2
|
||||
pos := f.Pos(offset)
|
||||
posn := f.Position(pos)
|
||||
if posn.Line == line {
|
||||
return pos - (token.Pos(posn.Column) - 1)
|
||||
}
|
||||
|
||||
if min+1 >= max {
|
||||
return token.NoPos
|
||||
}
|
||||
|
||||
if posn.Line < line {
|
||||
min = offset
|
||||
} else {
|
||||
max = offset
|
||||
}
|
||||
}
|
||||
}
|
||||
16
vendor/golang.org/x/tools/internal/span/token112.go
generated
vendored
16
vendor/golang.org/x/tools/internal/span/token112.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.12
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
)
|
||||
|
||||
// TODO(rstambler): Delete this file when we no longer support Go 1.11.
|
||||
func lineStart(f *token.File, line int) token.Pos {
|
||||
return f.LineStart(line)
|
||||
}
|
||||
152
vendor/golang.org/x/tools/internal/span/uri.go
generated
vendored
152
vendor/golang.org/x/tools/internal/span/uri.go
generated
vendored
@@ -1,152 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
const fileScheme = "file"
|
||||
|
||||
// URI represents the full URI for a file.
|
||||
type URI string
|
||||
|
||||
// Filename returns the file path for the given URI.
|
||||
// It is an error to call this on a URI that is not a valid filename.
|
||||
func (uri URI) Filename() string {
|
||||
filename, err := filename(uri)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return filepath.FromSlash(filename)
|
||||
}
|
||||
|
||||
func filename(uri URI) (string, error) {
|
||||
if uri == "" {
|
||||
return "", nil
|
||||
}
|
||||
u, err := url.ParseRequestURI(string(uri))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if u.Scheme != fileScheme {
|
||||
return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri)
|
||||
}
|
||||
if isWindowsDriveURI(u.Path) {
|
||||
u.Path = u.Path[1:]
|
||||
}
|
||||
return u.Path, nil
|
||||
}
|
||||
|
||||
// NewURI returns a span URI for the string.
|
||||
// It will attempt to detect if the string is a file path or uri.
|
||||
func NewURI(s string) URI {
|
||||
if u, err := url.PathUnescape(s); err == nil {
|
||||
s = u
|
||||
}
|
||||
if strings.HasPrefix(s, fileScheme+"://") {
|
||||
return URI(s)
|
||||
}
|
||||
return FileURI(s)
|
||||
}
|
||||
|
||||
func CompareURI(a, b URI) int {
|
||||
if equalURI(a, b) {
|
||||
return 0
|
||||
}
|
||||
if a < b {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func equalURI(a, b URI) bool {
|
||||
if a == b {
|
||||
return true
|
||||
}
|
||||
// If we have the same URI basename, we may still have the same file URIs.
|
||||
if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) {
|
||||
return false
|
||||
}
|
||||
fa, err := filename(a)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fb, err := filename(b)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Stat the files to check if they are equal.
|
||||
infoa, err := os.Stat(filepath.FromSlash(fa))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
infob, err := os.Stat(filepath.FromSlash(fb))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return os.SameFile(infoa, infob)
|
||||
}
|
||||
|
||||
// FileURI returns a span URI for the supplied file path.
|
||||
// It will always have the file scheme.
|
||||
func FileURI(path string) URI {
|
||||
if path == "" {
|
||||
return ""
|
||||
}
|
||||
// Handle standard library paths that contain the literal "$GOROOT".
|
||||
// TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT.
|
||||
const prefix = "$GOROOT"
|
||||
if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) {
|
||||
suffix := path[len(prefix):]
|
||||
path = runtime.GOROOT() + suffix
|
||||
}
|
||||
if !isWindowsDrivePath(path) {
|
||||
if abs, err := filepath.Abs(path); err == nil {
|
||||
path = abs
|
||||
}
|
||||
}
|
||||
// Check the file path again, in case it became absolute.
|
||||
if isWindowsDrivePath(path) {
|
||||
path = "/" + path
|
||||
}
|
||||
path = filepath.ToSlash(path)
|
||||
u := url.URL{
|
||||
Scheme: fileScheme,
|
||||
Path: path,
|
||||
}
|
||||
uri := u.String()
|
||||
if unescaped, err := url.PathUnescape(uri); err == nil {
|
||||
uri = unescaped
|
||||
}
|
||||
return URI(uri)
|
||||
}
|
||||
|
||||
// isWindowsDrivePath returns true if the file path is of the form used by
|
||||
// Windows. We check if the path begins with a drive letter, followed by a ":".
|
||||
func isWindowsDrivePath(path string) bool {
|
||||
if len(path) < 4 {
|
||||
return false
|
||||
}
|
||||
return unicode.IsLetter(rune(path[0])) && path[1] == ':'
|
||||
}
|
||||
|
||||
// isWindowsDriveURI returns true if the file URI is of the format used by
|
||||
// Windows URIs. The url.Parse package does not specially handle Windows paths
|
||||
// (see https://golang.org/issue/6027). We check if the URI path has
|
||||
// a drive prefix (e.g. "/C:"). If so, we trim the leading "/".
|
||||
func isWindowsDriveURI(uri string) bool {
|
||||
if len(uri) < 4 {
|
||||
return false
|
||||
}
|
||||
return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':'
|
||||
}
|
||||
94
vendor/golang.org/x/tools/internal/span/utf16.go
generated
vendored
94
vendor/golang.org/x/tools/internal/span/utf16.go
generated
vendored
@@ -1,94 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode/utf16"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ToUTF16Column calculates the utf16 column expressed by the point given the
|
||||
// supplied file contents.
|
||||
// This is used to convert from the native (always in bytes) column
|
||||
// representation and the utf16 counts used by some editors.
|
||||
func ToUTF16Column(p Point, content []byte) (int, error) {
|
||||
if content == nil {
|
||||
return -1, fmt.Errorf("ToUTF16Column: missing content")
|
||||
}
|
||||
if !p.HasPosition() {
|
||||
return -1, fmt.Errorf("ToUTF16Column: point is missing position")
|
||||
}
|
||||
if !p.HasOffset() {
|
||||
return -1, fmt.Errorf("ToUTF16Column: point is missing offset")
|
||||
}
|
||||
offset := p.Offset() // 0-based
|
||||
colZero := p.Column() - 1 // 0-based
|
||||
if colZero == 0 {
|
||||
// 0-based column 0, so it must be chr 1
|
||||
return 1, nil
|
||||
} else if colZero < 0 {
|
||||
return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero)
|
||||
}
|
||||
// work out the offset at the start of the line using the column
|
||||
lineOffset := offset - colZero
|
||||
if lineOffset < 0 || offset > len(content) {
|
||||
return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content))
|
||||
}
|
||||
// Use the offset to pick out the line start.
|
||||
// This cannot panic: offset > len(content) and lineOffset < offset.
|
||||
start := content[lineOffset:]
|
||||
|
||||
// Now, truncate down to the supplied column.
|
||||
start = start[:colZero]
|
||||
|
||||
// and count the number of utf16 characters
|
||||
// in theory we could do this by hand more efficiently...
|
||||
return len(utf16.Encode([]rune(string(start)))) + 1, nil
|
||||
}
|
||||
|
||||
// FromUTF16Column advances the point by the utf16 character offset given the
|
||||
// supplied line contents.
|
||||
// This is used to convert from the utf16 counts used by some editors to the
|
||||
// native (always in bytes) column representation.
|
||||
func FromUTF16Column(p Point, chr int, content []byte) (Point, error) {
|
||||
if !p.HasOffset() {
|
||||
return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset")
|
||||
}
|
||||
// if chr is 1 then no adjustment needed
|
||||
if chr <= 1 {
|
||||
return p, nil
|
||||
}
|
||||
if p.Offset() >= len(content) {
|
||||
return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content))
|
||||
}
|
||||
remains := content[p.Offset():]
|
||||
// scan forward the specified number of characters
|
||||
for count := 1; count < chr; count++ {
|
||||
if len(remains) <= 0 {
|
||||
return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content")
|
||||
}
|
||||
r, w := utf8.DecodeRune(remains)
|
||||
if r == '\n' {
|
||||
// Per the LSP spec:
|
||||
//
|
||||
// > If the character value is greater than the line length it
|
||||
// > defaults back to the line length.
|
||||
break
|
||||
}
|
||||
remains = remains[w:]
|
||||
if r >= 0x10000 {
|
||||
// a two point rune
|
||||
count++
|
||||
// if we finished in a two point rune, do not advance past the first
|
||||
if count >= chr {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.v.Column += w
|
||||
p.v.Offset += w
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
228
vendor/golang.org/x/tools/present/args.go
generated
vendored
Normal file
228
vendor/golang.org/x/tools/present/args.go
generated
vendored
Normal file
@@ -0,0 +1,228 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package present
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// This file is stolen from go/src/cmd/godoc/codewalk.go.
|
||||
// It's an evaluator for the file address syntax implemented by acme and sam,
|
||||
// but using Go-native regular expressions.
|
||||
// To keep things reasonably close, this version uses (?m:re) for all user-provided
|
||||
// regular expressions. That is the only change to the code from codewalk.go.
|
||||
// See http://9p.io/sys/doc/sam/sam.html Table II for details on the syntax.
|
||||
|
||||
// addrToByte evaluates the given address starting at offset start in data.
|
||||
// It returns the lo and hi byte offset of the matched region within data.
|
||||
func addrToByteRange(addr string, start int, data []byte) (lo, hi int, err error) {
|
||||
if addr == "" {
|
||||
lo, hi = start, len(data)
|
||||
return
|
||||
}
|
||||
var (
|
||||
dir byte
|
||||
prevc byte
|
||||
charOffset bool
|
||||
)
|
||||
lo = start
|
||||
hi = start
|
||||
for addr != "" && err == nil {
|
||||
c := addr[0]
|
||||
switch c {
|
||||
default:
|
||||
err = errors.New("invalid address syntax near " + string(c))
|
||||
case ',':
|
||||
if len(addr) == 1 {
|
||||
hi = len(data)
|
||||
} else {
|
||||
_, hi, err = addrToByteRange(addr[1:], hi, data)
|
||||
}
|
||||
return
|
||||
|
||||
case '+', '-':
|
||||
if prevc == '+' || prevc == '-' {
|
||||
lo, hi, err = addrNumber(data, lo, hi, prevc, 1, charOffset)
|
||||
}
|
||||
dir = c
|
||||
|
||||
case '$':
|
||||
lo = len(data)
|
||||
hi = len(data)
|
||||
if len(addr) > 1 {
|
||||
dir = '+'
|
||||
}
|
||||
|
||||
case '#':
|
||||
charOffset = true
|
||||
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
var i int
|
||||
for i = 1; i < len(addr); i++ {
|
||||
if addr[i] < '0' || addr[i] > '9' {
|
||||
break
|
||||
}
|
||||
}
|
||||
var n int
|
||||
n, err = strconv.Atoi(addr[0:i])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
lo, hi, err = addrNumber(data, lo, hi, dir, n, charOffset)
|
||||
dir = 0
|
||||
charOffset = false
|
||||
prevc = c
|
||||
addr = addr[i:]
|
||||
continue
|
||||
|
||||
case '/':
|
||||
var i, j int
|
||||
Regexp:
|
||||
for i = 1; i < len(addr); i++ {
|
||||
switch addr[i] {
|
||||
case '\\':
|
||||
i++
|
||||
case '/':
|
||||
j = i + 1
|
||||
break Regexp
|
||||
}
|
||||
}
|
||||
if j == 0 {
|
||||
j = i
|
||||
}
|
||||
pattern := addr[1:i]
|
||||
lo, hi, err = addrRegexp(data, lo, hi, dir, pattern)
|
||||
prevc = c
|
||||
addr = addr[j:]
|
||||
continue
|
||||
}
|
||||
prevc = c
|
||||
addr = addr[1:]
|
||||
}
|
||||
|
||||
if err == nil && dir != 0 {
|
||||
lo, hi, err = addrNumber(data, lo, hi, dir, 1, charOffset)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
return lo, hi, nil
|
||||
}
|
||||
|
||||
// addrNumber applies the given dir, n, and charOffset to the address lo, hi.
|
||||
// dir is '+' or '-', n is the count, and charOffset is true if the syntax
|
||||
// used was #n. Applying +n (or +#n) means to advance n lines
|
||||
// (or characters) after hi. Applying -n (or -#n) means to back up n lines
|
||||
// (or characters) before lo.
|
||||
// The return value is the new lo, hi.
|
||||
func addrNumber(data []byte, lo, hi int, dir byte, n int, charOffset bool) (int, int, error) {
|
||||
switch dir {
|
||||
case 0:
|
||||
lo = 0
|
||||
hi = 0
|
||||
fallthrough
|
||||
|
||||
case '+':
|
||||
if charOffset {
|
||||
pos := hi
|
||||
for ; n > 0 && pos < len(data); n-- {
|
||||
_, size := utf8.DecodeRune(data[pos:])
|
||||
pos += size
|
||||
}
|
||||
if n == 0 {
|
||||
return pos, pos, nil
|
||||
}
|
||||
break
|
||||
}
|
||||
// find next beginning of line
|
||||
if hi > 0 {
|
||||
for hi < len(data) && data[hi-1] != '\n' {
|
||||
hi++
|
||||
}
|
||||
}
|
||||
lo = hi
|
||||
if n == 0 {
|
||||
return lo, hi, nil
|
||||
}
|
||||
for ; hi < len(data); hi++ {
|
||||
if data[hi] != '\n' {
|
||||
continue
|
||||
}
|
||||
switch n--; n {
|
||||
case 1:
|
||||
lo = hi + 1
|
||||
case 0:
|
||||
return lo, hi + 1, nil
|
||||
}
|
||||
}
|
||||
|
||||
case '-':
|
||||
if charOffset {
|
||||
// Scan backward for bytes that are not UTF-8 continuation bytes.
|
||||
pos := lo
|
||||
for ; pos > 0 && n > 0; pos-- {
|
||||
if data[pos]&0xc0 != 0x80 {
|
||||
n--
|
||||
}
|
||||
}
|
||||
if n == 0 {
|
||||
return pos, pos, nil
|
||||
}
|
||||
break
|
||||
}
|
||||
// find earlier beginning of line
|
||||
for lo > 0 && data[lo-1] != '\n' {
|
||||
lo--
|
||||
}
|
||||
hi = lo
|
||||
if n == 0 {
|
||||
return lo, hi, nil
|
||||
}
|
||||
for ; lo >= 0; lo-- {
|
||||
if lo > 0 && data[lo-1] != '\n' {
|
||||
continue
|
||||
}
|
||||
switch n--; n {
|
||||
case 1:
|
||||
hi = lo
|
||||
case 0:
|
||||
return lo, hi, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0, 0, errors.New("address out of range")
|
||||
}
|
||||
|
||||
// addrRegexp searches for pattern in the given direction starting at lo, hi.
|
||||
// The direction dir is '+' (search forward from hi) or '-' (search backward from lo).
|
||||
// Backward searches are unimplemented.
|
||||
func addrRegexp(data []byte, lo, hi int, dir byte, pattern string) (int, int, error) {
|
||||
// We want ^ and $ to work as in sam/acme, so use ?m.
|
||||
re, err := regexp.Compile("(?m:" + pattern + ")")
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
if dir == '-' {
|
||||
// Could implement reverse search using binary search
|
||||
// through file, but that seems like overkill.
|
||||
return 0, 0, errors.New("reverse search not implemented")
|
||||
}
|
||||
m := re.FindIndex(data[hi:])
|
||||
if len(m) > 0 {
|
||||
m[0] += hi
|
||||
m[1] += hi
|
||||
} else if hi > 0 {
|
||||
// No match. Wrap to beginning of data.
|
||||
m = re.FindIndex(data)
|
||||
}
|
||||
if len(m) == 0 {
|
||||
return 0, 0, errors.New("no match for " + pattern)
|
||||
}
|
||||
return m[0], m[1], nil
|
||||
}
|
||||
22
vendor/golang.org/x/tools/present/caption.go
generated
vendored
Normal file
22
vendor/golang.org/x/tools/present/caption.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package present
|
||||
|
||||
import "strings"
|
||||
|
||||
func init() {
|
||||
Register("caption", parseCaption)
|
||||
}
|
||||
|
||||
type Caption struct {
|
||||
Text string
|
||||
}
|
||||
|
||||
func (c Caption) TemplateName() string { return "caption" }
|
||||
|
||||
func parseCaption(_ *Context, _ string, _ int, text string) (Elem, error) {
|
||||
text = strings.TrimSpace(strings.TrimPrefix(text, ".caption"))
|
||||
return Caption{text}, nil
|
||||
}
|
||||
267
vendor/golang.org/x/tools/present/code.go
generated
vendored
Normal file
267
vendor/golang.org/x/tools/present/code.go
generated
vendored
Normal file
@@ -0,0 +1,267 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package present
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PlayEnabled specifies whether runnable playground snippets should be
|
||||
// displayed in the present user interface.
|
||||
var PlayEnabled = false
|
||||
|
||||
// TODO(adg): replace the PlayEnabled flag with something less spaghetti-like.
|
||||
// Instead this will probably be determined by a template execution Context
|
||||
// value that contains various global metadata required when rendering
|
||||
// templates.
|
||||
|
||||
// NotesEnabled specifies whether presenter notes should be displayed in the
|
||||
// present user interface.
|
||||
var NotesEnabled = false
|
||||
|
||||
func init() {
|
||||
Register("code", parseCode)
|
||||
Register("play", parseCode)
|
||||
}
|
||||
|
||||
type Code struct {
|
||||
Text template.HTML
|
||||
Play bool // runnable code
|
||||
Edit bool // editable code
|
||||
FileName string // file name
|
||||
Ext string // file extension
|
||||
Raw []byte // content of the file
|
||||
}
|
||||
|
||||
func (c Code) TemplateName() string { return "code" }
|
||||
|
||||
// The input line is a .code or .play entry with a file name and an optional HLfoo marker on the end.
|
||||
// Anything between the file and HL (if any) is an address expression, which we treat as a string here.
|
||||
// We pick off the HL first, for easy parsing.
|
||||
var (
|
||||
highlightRE = regexp.MustCompile(`\s+HL([a-zA-Z0-9_]+)?$`)
|
||||
hlCommentRE = regexp.MustCompile(`(.+) // HL(.*)$`)
|
||||
codeRE = regexp.MustCompile(`\.(code|play)\s+((?:(?:-edit|-numbers)\s+)*)([^\s]+)(?:\s+(.*))?$`)
|
||||
)
|
||||
|
||||
// parseCode parses a code present directive. Its syntax:
|
||||
// .code [-numbers] [-edit] <filename> [address] [highlight]
|
||||
// The directive may also be ".play" if the snippet is executable.
|
||||
func parseCode(ctx *Context, sourceFile string, sourceLine int, cmd string) (Elem, error) {
|
||||
cmd = strings.TrimSpace(cmd)
|
||||
|
||||
// Pull off the HL, if any, from the end of the input line.
|
||||
highlight := ""
|
||||
if hl := highlightRE.FindStringSubmatchIndex(cmd); len(hl) == 4 {
|
||||
if hl[2] < 0 || hl[3] < 0 {
|
||||
return nil, fmt.Errorf("%s:%d invalid highlight syntax", sourceFile, sourceLine)
|
||||
}
|
||||
highlight = cmd[hl[2]:hl[3]]
|
||||
cmd = cmd[:hl[2]-2]
|
||||
}
|
||||
|
||||
// Parse the remaining command line.
|
||||
// Arguments:
|
||||
// args[0]: whole match
|
||||
// args[1]: .code/.play
|
||||
// args[2]: flags ("-edit -numbers")
|
||||
// args[3]: file name
|
||||
// args[4]: optional address
|
||||
args := codeRE.FindStringSubmatch(cmd)
|
||||
if len(args) != 5 {
|
||||
return nil, fmt.Errorf("%s:%d: syntax error for .code/.play invocation", sourceFile, sourceLine)
|
||||
}
|
||||
command, flags, file, addr := args[1], args[2], args[3], strings.TrimSpace(args[4])
|
||||
play := command == "play" && PlayEnabled
|
||||
|
||||
// Read in code file and (optionally) match address.
|
||||
filename := filepath.Join(filepath.Dir(sourceFile), file)
|
||||
textBytes, err := ctx.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s:%d: %v", sourceFile, sourceLine, err)
|
||||
}
|
||||
lo, hi, err := addrToByteRange(addr, 0, textBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s:%d: %v", sourceFile, sourceLine, err)
|
||||
}
|
||||
if lo > hi {
|
||||
// The search in addrToByteRange can wrap around so we might
|
||||
// end up with the range ending before its starting point
|
||||
hi, lo = lo, hi
|
||||
}
|
||||
|
||||
// Acme pattern matches can stop mid-line,
|
||||
// so run to end of line in both directions if not at line start/end.
|
||||
for lo > 0 && textBytes[lo-1] != '\n' {
|
||||
lo--
|
||||
}
|
||||
if hi > 0 {
|
||||
for hi < len(textBytes) && textBytes[hi-1] != '\n' {
|
||||
hi++
|
||||
}
|
||||
}
|
||||
|
||||
lines := codeLines(textBytes, lo, hi)
|
||||
|
||||
data := &codeTemplateData{
|
||||
Lines: formatLines(lines, highlight),
|
||||
Edit: strings.Contains(flags, "-edit"),
|
||||
Numbers: strings.Contains(flags, "-numbers"),
|
||||
}
|
||||
|
||||
// Include before and after in a hidden span for playground code.
|
||||
if play {
|
||||
data.Prefix = textBytes[:lo]
|
||||
data.Suffix = textBytes[hi:]
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := codeTemplate.Execute(&buf, data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Code{
|
||||
Text: template.HTML(buf.String()),
|
||||
Play: play,
|
||||
Edit: data.Edit,
|
||||
FileName: filepath.Base(filename),
|
||||
Ext: filepath.Ext(filename),
|
||||
Raw: rawCode(lines),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// formatLines returns a new slice of codeLine with the given lines
|
||||
// replacing tabs with spaces and adding highlighting where needed.
|
||||
func formatLines(lines []codeLine, highlight string) []codeLine {
|
||||
formatted := make([]codeLine, len(lines))
|
||||
for i, line := range lines {
|
||||
// Replace tabs with spaces, which work better in HTML.
|
||||
line.L = strings.Replace(line.L, "\t", " ", -1)
|
||||
|
||||
// Highlight lines that end with "// HL[highlight]"
|
||||
// and strip the magic comment.
|
||||
if m := hlCommentRE.FindStringSubmatch(line.L); m != nil {
|
||||
line.L = m[1]
|
||||
line.HL = m[2] == highlight
|
||||
}
|
||||
|
||||
formatted[i] = line
|
||||
}
|
||||
return formatted
|
||||
}
|
||||
|
||||
// rawCode returns the code represented by the given codeLines without any kind
|
||||
// of formatting.
|
||||
func rawCode(lines []codeLine) []byte {
|
||||
b := new(bytes.Buffer)
|
||||
for _, line := range lines {
|
||||
b.WriteString(line.L)
|
||||
b.WriteByte('\n')
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
type codeTemplateData struct {
|
||||
Lines []codeLine
|
||||
Prefix, Suffix []byte
|
||||
Edit, Numbers bool
|
||||
}
|
||||
|
||||
var leadingSpaceRE = regexp.MustCompile(`^[ \t]*`)
|
||||
|
||||
var codeTemplate = template.Must(template.New("code").Funcs(template.FuncMap{
|
||||
"trimSpace": strings.TrimSpace,
|
||||
"leadingSpace": leadingSpaceRE.FindString,
|
||||
}).Parse(codeTemplateHTML))
|
||||
|
||||
const codeTemplateHTML = `
|
||||
{{with .Prefix}}<pre style="display: none"><span>{{printf "%s" .}}</span></pre>{{end}}
|
||||
|
||||
<pre{{if .Edit}} contenteditable="true" spellcheck="false"{{end}}{{if .Numbers}} class="numbers"{{end}}>{{/*
|
||||
*/}}{{range .Lines}}<span num="{{.N}}">{{/*
|
||||
*/}}{{if .HL}}{{leadingSpace .L}}<b>{{trimSpace .L}}</b>{{/*
|
||||
*/}}{{else}}{{.L}}{{end}}{{/*
|
||||
*/}}</span>
|
||||
{{end}}</pre>
|
||||
|
||||
{{with .Suffix}}<pre style="display: none"><span>{{printf "%s" .}}</span></pre>{{end}}
|
||||
`
|
||||
|
||||
// codeLine represents a line of code extracted from a source file.
|
||||
type codeLine struct {
|
||||
L string // The line of code.
|
||||
N int // The line number from the source file.
|
||||
HL bool // Whether the line should be highlighted.
|
||||
}
|
||||
|
||||
// codeLines takes a source file and returns the lines that
|
||||
// span the byte range specified by start and end.
|
||||
// It discards lines that end in "OMIT".
|
||||
func codeLines(src []byte, start, end int) (lines []codeLine) {
|
||||
startLine := 1
|
||||
for i, b := range src {
|
||||
if i == start {
|
||||
break
|
||||
}
|
||||
if b == '\n' {
|
||||
startLine++
|
||||
}
|
||||
}
|
||||
s := bufio.NewScanner(bytes.NewReader(src[start:end]))
|
||||
for n := startLine; s.Scan(); n++ {
|
||||
l := s.Text()
|
||||
if strings.HasSuffix(l, "OMIT") {
|
||||
continue
|
||||
}
|
||||
lines = append(lines, codeLine{L: l, N: n})
|
||||
}
|
||||
// Trim leading and trailing blank lines.
|
||||
for len(lines) > 0 && len(lines[0].L) == 0 {
|
||||
lines = lines[1:]
|
||||
}
|
||||
for len(lines) > 0 && len(lines[len(lines)-1].L) == 0 {
|
||||
lines = lines[:len(lines)-1]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func parseArgs(name string, line int, args []string) (res []interface{}, err error) {
|
||||
res = make([]interface{}, len(args))
|
||||
for i, v := range args {
|
||||
if len(v) == 0 {
|
||||
return nil, fmt.Errorf("%s:%d bad code argument %q", name, line, v)
|
||||
}
|
||||
switch v[0] {
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
n, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s:%d bad code argument %q", name, line, v)
|
||||
}
|
||||
res[i] = n
|
||||
case '/':
|
||||
if len(v) < 2 || v[len(v)-1] != '/' {
|
||||
return nil, fmt.Errorf("%s:%d bad code argument %q", name, line, v)
|
||||
}
|
||||
res[i] = v
|
||||
case '$':
|
||||
res[i] = "$"
|
||||
case '_':
|
||||
if len(v) == 1 {
|
||||
// Do nothing; "_" indicates an intentionally empty parameter.
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
return nil, fmt.Errorf("%s:%d bad code argument %q", name, line, v)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
261
vendor/golang.org/x/tools/present/doc.go
generated
vendored
Normal file
261
vendor/golang.org/x/tools/present/doc.go
generated
vendored
Normal file
@@ -0,0 +1,261 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
The present file format
|
||||
|
||||
Present files have the following format. The first non-blank non-comment
|
||||
line is the title, so the header looks like
|
||||
|
||||
Title of document
|
||||
Subtitle of document
|
||||
15:04 2 Jan 2006
|
||||
Tags: foo, bar, baz
|
||||
<blank line>
|
||||
Author Name
|
||||
Job title, Company
|
||||
joe@example.com
|
||||
http://url/
|
||||
@twitter_name
|
||||
|
||||
The subtitle, date, and tags lines are optional.
|
||||
|
||||
The date line may be written without a time:
|
||||
2 Jan 2006
|
||||
In this case, the time will be interpreted as 10am UTC on that date.
|
||||
|
||||
The tags line is a comma-separated list of tags that may be used to categorize
|
||||
the document.
|
||||
|
||||
The author section may contain a mixture of text, twitter names, and links.
|
||||
For slide presentations, only the plain text lines will be displayed on the
|
||||
first slide.
|
||||
|
||||
Multiple presenters may be specified, separated by a blank line.
|
||||
|
||||
After that come slides/sections, each after a blank line:
|
||||
|
||||
* Title of slide or section (must have asterisk)
|
||||
|
||||
Some Text
|
||||
|
||||
** Subsection
|
||||
|
||||
- bullets
|
||||
- more bullets
|
||||
- a bullet with
|
||||
|
||||
*** Sub-subsection
|
||||
|
||||
Some More text
|
||||
|
||||
Preformatted text
|
||||
is indented (however you like)
|
||||
|
||||
Further Text, including invocations like:
|
||||
|
||||
.code x.go /^func main/,/^}/
|
||||
.play y.go
|
||||
.image image.jpg
|
||||
.background image.jpg
|
||||
.iframe http://foo
|
||||
.link http://foo label
|
||||
.html file.html
|
||||
.caption _Gopher_ by [[https://www.instagram.com/reneefrench/][Renée French]]
|
||||
|
||||
Again, more text
|
||||
|
||||
Blank lines are OK (not mandatory) after the title and after the
|
||||
text. Text, bullets, and .code etc. are all optional; title is
|
||||
not.
|
||||
|
||||
Lines starting with # in column 1 are commentary.
|
||||
|
||||
Fonts:
|
||||
|
||||
Within the input for plain text or lists, text bracketed by font
|
||||
markers will be presented in italic, bold, or program font.
|
||||
Marker characters are _ (italic), * (bold) and ` (program font).
|
||||
An opening marker must be preceded by a space or punctuation
|
||||
character or else be at start of a line; similarly, a closing
|
||||
marker must be followed by a space or punctuation character or
|
||||
else be at the end of a line. Unmatched markers appear as plain text.
|
||||
There must be no spaces between markers. Within marked text,
|
||||
a single marker character becomes a space and a doubled single
|
||||
marker quotes the marker character.
|
||||
|
||||
_italic_
|
||||
*bold*
|
||||
`program`
|
||||
Markup—_especially_italic_text_—can easily be overused.
|
||||
_Why_use_scoped__ptr_? Use plain ***ptr* instead.
|
||||
|
||||
Inline links:
|
||||
|
||||
Links can be included in any text with the form [[url][label]], or
|
||||
[[url]] to use the URL itself as the label.
|
||||
|
||||
Functions:
|
||||
|
||||
A number of template functions are available through invocations
|
||||
in the input text. Each such invocation contains a period as the
|
||||
first character on the line, followed immediately by the name of
|
||||
the function, followed by any arguments. A typical invocation might
|
||||
be
|
||||
.play demo.go /^func show/,/^}/
|
||||
(except that the ".play" must be at the beginning of the line and
|
||||
not be indented like this.)
|
||||
|
||||
Here follows a description of the functions:
|
||||
|
||||
code:
|
||||
|
||||
Injects program source into the output by extracting code from files
|
||||
and injecting them as HTML-escaped <pre> blocks. The argument is
|
||||
a file name followed by an optional address that specifies what
|
||||
section of the file to display. The address syntax is similar in
|
||||
its simplest form to that of ed, but comes from sam and is more
|
||||
general. See
|
||||
https://plan9.io/sys/doc/sam/sam.html Table II
|
||||
for full details. The displayed block is always rounded out to a
|
||||
full line at both ends.
|
||||
|
||||
If no pattern is present, the entire file is displayed.
|
||||
|
||||
Any line in the program that ends with the four characters
|
||||
OMIT
|
||||
is deleted from the source before inclusion, making it easy
|
||||
to write things like
|
||||
.code test.go /START OMIT/,/END OMIT/
|
||||
to find snippets like this
|
||||
tedious_code = boring_function()
|
||||
// START OMIT
|
||||
interesting_code = fascinating_function()
|
||||
// END OMIT
|
||||
and see only this:
|
||||
interesting_code = fascinating_function()
|
||||
|
||||
Also, inside the displayed text a line that ends
|
||||
// HL
|
||||
will be highlighted in the display. A highlighting mark may have a
|
||||
suffix word, such as
|
||||
// HLxxx
|
||||
Such highlights are enabled only if the code invocation ends with
|
||||
"HL" followed by the word:
|
||||
.code test.go /^type Foo/,/^}/ HLxxx
|
||||
|
||||
The .code function may take one or more flags immediately preceding
|
||||
the filename. This command shows test.go in an editable text area:
|
||||
.code -edit test.go
|
||||
This command shows test.go with line numbers:
|
||||
.code -numbers test.go
|
||||
|
||||
play:
|
||||
|
||||
The function "play" is the same as "code" but puts a button
|
||||
on the displayed source so the program can be run from the browser.
|
||||
Although only the selected text is shown, all the source is included
|
||||
in the HTML output so it can be presented to the compiler.
|
||||
|
||||
link:
|
||||
|
||||
Create a hyperlink. The syntax is 1 or 2 space-separated arguments.
|
||||
The first argument is always the HTTP URL. If there is a second
|
||||
argument, it is the text label to display for this link.
|
||||
|
||||
.link http://golang.org golang.org
|
||||
|
||||
image:
|
||||
|
||||
The template uses the function "image" to inject picture files.
|
||||
|
||||
The syntax is simple: 1 or 3 space-separated arguments.
|
||||
The first argument is always the file name.
|
||||
If there are more arguments, they are the height and width;
|
||||
both must be present, or substituted with an underscore.
|
||||
Replacing a dimension argument with the underscore parameter
|
||||
preserves the aspect ratio of the image when scaling.
|
||||
|
||||
.image images/betsy.jpg 100 200
|
||||
|
||||
.image images/janet.jpg _ 300
|
||||
|
||||
video:
|
||||
|
||||
The template uses the function "video" to inject video files.
|
||||
|
||||
The syntax is simple: 2 or 4 space-separated arguments.
|
||||
The first argument is always the file name.
|
||||
The second argument is always the file content-type.
|
||||
If there are more arguments, they are the height and width;
|
||||
both must be present, or substituted with an underscore.
|
||||
Replacing a dimension argument with the underscore parameter
|
||||
preserves the aspect ratio of the video when scaling.
|
||||
|
||||
.video videos/evangeline.mp4 video/mp4 400 600
|
||||
|
||||
.video videos/mabel.ogg video/ogg 500 _
|
||||
|
||||
background:
|
||||
|
||||
The template uses the function "background" to set the background image for
|
||||
a slide. The only argument is the file name of the image.
|
||||
|
||||
.background images/susan.jpg
|
||||
|
||||
caption:
|
||||
|
||||
The template uses the function "caption" to inject figure captions.
|
||||
|
||||
The text after ".caption" is embedded in a figcaption element after
|
||||
processing styling and links as in standard text lines.
|
||||
|
||||
.caption _Gopher_ by [[http://www.reneefrench.com][Renée French]]
|
||||
|
||||
iframe:
|
||||
|
||||
The function "iframe" injects iframes (pages inside pages).
|
||||
Its syntax is the same as that of image.
|
||||
|
||||
html:
|
||||
|
||||
The function html includes the contents of the specified file as
|
||||
unescaped HTML. This is useful for including custom HTML elements
|
||||
that cannot be created using only the slide format.
|
||||
It is your responsibility to make sure the included HTML is valid and safe.
|
||||
|
||||
.html file.html
|
||||
|
||||
Presenter notes:
|
||||
|
||||
Presenter notes may be enabled by appending the "-notes" flag when you run
|
||||
your "present" binary.
|
||||
|
||||
This will allow you to open a second window by pressing 'N' from your browser
|
||||
displaying your slides. The second window is completely synced with your main
|
||||
window, except that presenter notes are only visible on the second window.
|
||||
|
||||
Lines that begin with ": " are treated as presenter notes.
|
||||
|
||||
* Title of slide
|
||||
|
||||
Some Text
|
||||
|
||||
: Presenter notes (first paragraph)
|
||||
: Presenter notes (subsequent paragraph(s))
|
||||
|
||||
Notes may appear anywhere within the slide text. For example:
|
||||
|
||||
* Title of slide
|
||||
|
||||
: Presenter notes (first paragraph)
|
||||
|
||||
Some Text
|
||||
|
||||
: Presenter notes (subsequent paragraph(s))
|
||||
|
||||
This has the same result as the example above.
|
||||
|
||||
*/
|
||||
package present // import "golang.org/x/tools/present"
|
||||
31
vendor/golang.org/x/tools/present/html.go
generated
vendored
Normal file
31
vendor/golang.org/x/tools/present/html.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
package present
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"html/template"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register("html", parseHTML)
|
||||
}
|
||||
|
||||
func parseHTML(ctx *Context, fileName string, lineno int, text string) (Elem, error) {
|
||||
p := strings.Fields(text)
|
||||
if len(p) != 2 {
|
||||
return nil, errors.New("invalid .html args")
|
||||
}
|
||||
name := filepath.Join(filepath.Dir(fileName), p[1])
|
||||
b, err := ctx.ReadFile(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return HTML{template.HTML(b)}, nil
|
||||
}
|
||||
|
||||
type HTML struct {
|
||||
template.HTML
|
||||
}
|
||||
|
||||
func (s HTML) TemplateName() string { return "html" }
|
||||
48
vendor/golang.org/x/tools/present/iframe.go
generated
vendored
Normal file
48
vendor/golang.org/x/tools/present/iframe.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package present
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register("iframe", parseIframe)
|
||||
}
|
||||
|
||||
type Iframe struct {
|
||||
URL string
|
||||
Width int
|
||||
Height int
|
||||
}
|
||||
|
||||
func (i Iframe) TemplateName() string { return "iframe" }
|
||||
|
||||
func parseIframe(ctx *Context, fileName string, lineno int, text string) (Elem, error) {
|
||||
args := strings.Fields(text)
|
||||
if len(args) < 2 {
|
||||
return nil, fmt.Errorf("incorrect iframe invocation: %q", text)
|
||||
}
|
||||
i := Iframe{URL: args[1]}
|
||||
a, err := parseArgs(fileName, lineno, args[2:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(a) {
|
||||
case 0:
|
||||
// no size parameters
|
||||
case 2:
|
||||
if v, ok := a[0].(int); ok {
|
||||
i.Height = v
|
||||
}
|
||||
if v, ok := a[1].(int); ok {
|
||||
i.Width = v
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("incorrect iframe invocation: %q", text)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
53
vendor/golang.org/x/tools/present/image.go
generated
vendored
Normal file
53
vendor/golang.org/x/tools/present/image.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package present
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register("image", parseImage)
|
||||
}
|
||||
|
||||
type Image struct {
|
||||
URL string
|
||||
Width int
|
||||
Height int
|
||||
}
|
||||
|
||||
func (i Image) TemplateName() string { return "image" }
|
||||
|
||||
func parseImage(ctx *Context, fileName string, lineno int, text string) (Elem, error) {
|
||||
args := strings.Fields(text)
|
||||
if len(args) < 2 {
|
||||
return nil, fmt.Errorf("incorrect image invocation: %q", text)
|
||||
}
|
||||
img := Image{URL: args[1]}
|
||||
a, err := parseArgs(fileName, lineno, args[2:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(a) {
|
||||
case 0:
|
||||
// no size parameters
|
||||
case 2:
|
||||
// If a parameter is empty (underscore) or invalid
|
||||
// leave the field set to zero. The "image" action
|
||||
// template will then omit that img tag attribute and
|
||||
// the browser will calculate the value to preserve
|
||||
// the aspect ratio.
|
||||
if v, ok := a[0].(int); ok {
|
||||
img.Height = v
|
||||
}
|
||||
if v, ok := a[1].(int); ok {
|
||||
img.Width = v
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("incorrect image invocation: %q", text)
|
||||
}
|
||||
return img, nil
|
||||
}
|
||||
100
vendor/golang.org/x/tools/present/link.go
generated
vendored
Normal file
100
vendor/golang.org/x/tools/present/link.go
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package present
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register("link", parseLink)
|
||||
}
|
||||
|
||||
type Link struct {
|
||||
URL *url.URL
|
||||
Label string
|
||||
}
|
||||
|
||||
func (l Link) TemplateName() string { return "link" }
|
||||
|
||||
func parseLink(ctx *Context, fileName string, lineno int, text string) (Elem, error) {
|
||||
args := strings.Fields(text)
|
||||
if len(args) < 2 {
|
||||
return nil, fmt.Errorf("link element must have at least 2 arguments")
|
||||
}
|
||||
url, err := url.Parse(args[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
label := ""
|
||||
if len(args) > 2 {
|
||||
label = strings.Join(args[2:], " ")
|
||||
} else {
|
||||
scheme := url.Scheme + "://"
|
||||
if url.Scheme == "mailto" {
|
||||
scheme = "mailto:"
|
||||
}
|
||||
label = strings.Replace(url.String(), scheme, "", 1)
|
||||
}
|
||||
return Link{url, label}, nil
|
||||
}
|
||||
|
||||
func renderLink(href, text string) string {
|
||||
text = font(text)
|
||||
if text == "" {
|
||||
text = href
|
||||
}
|
||||
// Open links in new window only when their url is absolute.
|
||||
target := "_blank"
|
||||
if u, err := url.Parse(href); err != nil {
|
||||
log.Println("renderLink parsing url:", err)
|
||||
} else if !u.IsAbs() || u.Scheme == "javascript" {
|
||||
target = "_self"
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`<a href="%s" target="%s">%s</a>`, href, target, text)
|
||||
}
|
||||
|
||||
// parseInlineLink parses an inline link at the start of s, and returns
|
||||
// a rendered HTML link and the total length of the raw inline link.
|
||||
// If no inline link is present, it returns all zeroes.
|
||||
func parseInlineLink(s string) (link string, length int) {
|
||||
if !strings.HasPrefix(s, "[[") {
|
||||
return
|
||||
}
|
||||
end := strings.Index(s, "]]")
|
||||
if end == -1 {
|
||||
return
|
||||
}
|
||||
urlEnd := strings.Index(s, "]")
|
||||
rawURL := s[2:urlEnd]
|
||||
const badURLChars = `<>"{}|\^[] ` + "`" // per RFC2396 section 2.4.3
|
||||
if strings.ContainsAny(rawURL, badURLChars) {
|
||||
return
|
||||
}
|
||||
if urlEnd == end {
|
||||
simpleUrl := ""
|
||||
url, err := url.Parse(rawURL)
|
||||
if err == nil {
|
||||
// If the URL is http://foo.com, drop the http://
|
||||
// In other words, render [[http://golang.org]] as:
|
||||
// <a href="http://golang.org">golang.org</a>
|
||||
if strings.HasPrefix(rawURL, url.Scheme+"://") {
|
||||
simpleUrl = strings.TrimPrefix(rawURL, url.Scheme+"://")
|
||||
} else if strings.HasPrefix(rawURL, url.Scheme+":") {
|
||||
simpleUrl = strings.TrimPrefix(rawURL, url.Scheme+":")
|
||||
}
|
||||
}
|
||||
return renderLink(rawURL, simpleUrl), end + 2
|
||||
}
|
||||
if s[urlEnd:urlEnd+2] != "][" {
|
||||
return
|
||||
}
|
||||
text := s[urlEnd+2 : end]
|
||||
return renderLink(rawURL, text), end + 2
|
||||
}
|
||||
568
vendor/golang.org/x/tools/present/parse.go
generated
vendored
Normal file
568
vendor/golang.org/x/tools/present/parse.go
generated
vendored
Normal file
@@ -0,0 +1,568 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package present
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var (
|
||||
parsers = make(map[string]ParseFunc)
|
||||
funcs = template.FuncMap{}
|
||||
)
|
||||
|
||||
// Template returns an empty template with the action functions in its FuncMap.
|
||||
func Template() *template.Template {
|
||||
return template.New("").Funcs(funcs)
|
||||
}
|
||||
|
||||
// Render renders the doc to the given writer using the provided template.
|
||||
func (d *Doc) Render(w io.Writer, t *template.Template) error {
|
||||
data := struct {
|
||||
*Doc
|
||||
Template *template.Template
|
||||
PlayEnabled bool
|
||||
NotesEnabled bool
|
||||
}{d, t, PlayEnabled, NotesEnabled}
|
||||
return t.ExecuteTemplate(w, "root", data)
|
||||
}
|
||||
|
||||
// Render renders the section to the given writer using the provided template.
|
||||
func (s *Section) Render(w io.Writer, t *template.Template) error {
|
||||
data := struct {
|
||||
*Section
|
||||
Template *template.Template
|
||||
PlayEnabled bool
|
||||
}{s, t, PlayEnabled}
|
||||
return t.ExecuteTemplate(w, "section", data)
|
||||
}
|
||||
|
||||
type ParseFunc func(ctx *Context, fileName string, lineNumber int, inputLine string) (Elem, error)
|
||||
|
||||
// Register binds the named action, which does not begin with a period, to the
|
||||
// specified parser to be invoked when the name, with a period, appears in the
|
||||
// present input text.
|
||||
func Register(name string, parser ParseFunc) {
|
||||
if len(name) == 0 || name[0] == ';' {
|
||||
panic("bad name in Register: " + name)
|
||||
}
|
||||
parsers["."+name] = parser
|
||||
}
|
||||
|
||||
// Doc represents an entire document.
|
||||
type Doc struct {
|
||||
Title string
|
||||
Subtitle string
|
||||
Time time.Time
|
||||
Authors []Author
|
||||
TitleNotes []string
|
||||
Sections []Section
|
||||
Tags []string
|
||||
}
|
||||
|
||||
// Author represents the person who wrote and/or is presenting the document.
|
||||
type Author struct {
|
||||
Elem []Elem
|
||||
}
|
||||
|
||||
// TextElem returns the first text elements of the author details.
|
||||
// This is used to display the author' name, job title, and company
|
||||
// without the contact details.
|
||||
func (p *Author) TextElem() (elems []Elem) {
|
||||
for _, el := range p.Elem {
|
||||
if _, ok := el.(Text); !ok {
|
||||
break
|
||||
}
|
||||
elems = append(elems, el)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Section represents a section of a document (such as a presentation slide)
|
||||
// comprising a title and a list of elements.
|
||||
type Section struct {
|
||||
Number []int
|
||||
Title string
|
||||
Elem []Elem
|
||||
Notes []string
|
||||
Classes []string
|
||||
Styles []string
|
||||
}
|
||||
|
||||
// HTMLAttributes for the section
|
||||
func (s Section) HTMLAttributes() template.HTMLAttr {
|
||||
if len(s.Classes) == 0 && len(s.Styles) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var class string
|
||||
if len(s.Classes) > 0 {
|
||||
class = fmt.Sprintf(`class=%q`, strings.Join(s.Classes, " "))
|
||||
}
|
||||
var style string
|
||||
if len(s.Styles) > 0 {
|
||||
style = fmt.Sprintf(`style=%q`, strings.Join(s.Styles, " "))
|
||||
}
|
||||
return template.HTMLAttr(strings.Join([]string{class, style}, " "))
|
||||
}
|
||||
|
||||
// Sections contained within the section.
|
||||
func (s Section) Sections() (sections []Section) {
|
||||
for _, e := range s.Elem {
|
||||
if section, ok := e.(Section); ok {
|
||||
sections = append(sections, section)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Level returns the level of the given section.
|
||||
// The document title is level 1, main section 2, etc.
|
||||
func (s Section) Level() int {
|
||||
return len(s.Number) + 1
|
||||
}
|
||||
|
||||
// FormattedNumber returns a string containing the concatenation of the
|
||||
// numbers identifying a Section.
|
||||
func (s Section) FormattedNumber() string {
|
||||
b := &bytes.Buffer{}
|
||||
for _, n := range s.Number {
|
||||
fmt.Fprintf(b, "%v.", n)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (s Section) TemplateName() string { return "section" }
|
||||
|
||||
// Elem defines the interface for a present element. That is, something that
|
||||
// can provide the name of the template used to render the element.
|
||||
type Elem interface {
|
||||
TemplateName() string
|
||||
}
|
||||
|
||||
// renderElem implements the elem template function, used to render
|
||||
// sub-templates.
|
||||
func renderElem(t *template.Template, e Elem) (template.HTML, error) {
|
||||
var data interface{} = e
|
||||
if s, ok := e.(Section); ok {
|
||||
data = struct {
|
||||
Section
|
||||
Template *template.Template
|
||||
}{s, t}
|
||||
}
|
||||
return execTemplate(t, e.TemplateName(), data)
|
||||
}
|
||||
|
||||
// pageNum derives a page number from a section.
|
||||
func pageNum(s Section, offset int) int {
|
||||
if len(s.Number) == 0 {
|
||||
return offset
|
||||
}
|
||||
return s.Number[0] + offset
|
||||
}
|
||||
|
||||
func init() {
|
||||
funcs["elem"] = renderElem
|
||||
funcs["pagenum"] = pageNum
|
||||
}
|
||||
|
||||
// execTemplate is a helper to execute a template and return the output as a
|
||||
// template.HTML value.
|
||||
func execTemplate(t *template.Template, name string, data interface{}) (template.HTML, error) {
|
||||
b := new(bytes.Buffer)
|
||||
err := t.ExecuteTemplate(b, name, data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return template.HTML(b.String()), nil
|
||||
}
|
||||
|
||||
// Text represents an optionally preformatted paragraph.
|
||||
type Text struct {
|
||||
Lines []string
|
||||
Pre bool
|
||||
}
|
||||
|
||||
func (t Text) TemplateName() string { return "text" }
|
||||
|
||||
// List represents a bulleted list.
|
||||
type List struct {
|
||||
Bullet []string
|
||||
}
|
||||
|
||||
func (l List) TemplateName() string { return "list" }
|
||||
|
||||
// Lines is a helper for parsing line-based input.
|
||||
type Lines struct {
|
||||
line int // 0 indexed, so has 1-indexed number of last line returned
|
||||
text []string
|
||||
}
|
||||
|
||||
func readLines(r io.Reader) (*Lines, error) {
|
||||
var lines []string
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
lines = append(lines, s.Text())
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Lines{0, lines}, nil
|
||||
}
|
||||
|
||||
func (l *Lines) next() (text string, ok bool) {
|
||||
for {
|
||||
current := l.line
|
||||
l.line++
|
||||
if current >= len(l.text) {
|
||||
return "", false
|
||||
}
|
||||
text = l.text[current]
|
||||
// Lines starting with # are comments.
|
||||
if len(text) == 0 || text[0] != '#' {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (l *Lines) back() {
|
||||
l.line--
|
||||
}
|
||||
|
||||
func (l *Lines) nextNonEmpty() (text string, ok bool) {
|
||||
for {
|
||||
text, ok = l.next()
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if len(text) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// A Context specifies the supporting context for parsing a presentation.
|
||||
type Context struct {
|
||||
// ReadFile reads the file named by filename and returns the contents.
|
||||
ReadFile func(filename string) ([]byte, error)
|
||||
}
|
||||
|
||||
// ParseMode represents flags for the Parse function.
|
||||
type ParseMode int
|
||||
|
||||
const (
|
||||
// If set, parse only the title and subtitle.
|
||||
TitlesOnly ParseMode = 1
|
||||
)
|
||||
|
||||
// Parse parses a document from r.
|
||||
func (ctx *Context) Parse(r io.Reader, name string, mode ParseMode) (*Doc, error) {
|
||||
doc := new(Doc)
|
||||
lines, err := readLines(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := lines.line; i < len(lines.text); i++ {
|
||||
if strings.HasPrefix(lines.text[i], "*") {
|
||||
break
|
||||
}
|
||||
|
||||
if isSpeakerNote(lines.text[i]) {
|
||||
doc.TitleNotes = append(doc.TitleNotes, lines.text[i][2:])
|
||||
}
|
||||
}
|
||||
|
||||
err = parseHeader(doc, lines)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if mode&TitlesOnly != 0 {
|
||||
return doc, nil
|
||||
}
|
||||
|
||||
// Authors
|
||||
if doc.Authors, err = parseAuthors(lines); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Sections
|
||||
if doc.Sections, err = parseSections(ctx, name, lines, []int{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return doc, nil
|
||||
}
|
||||
|
||||
// Parse parses a document from r. Parse reads assets used by the presentation
|
||||
// from the file system using ioutil.ReadFile.
|
||||
func Parse(r io.Reader, name string, mode ParseMode) (*Doc, error) {
|
||||
ctx := Context{ReadFile: ioutil.ReadFile}
|
||||
return ctx.Parse(r, name, mode)
|
||||
}
|
||||
|
||||
// isHeading matches any section heading.
|
||||
var isHeading = regexp.MustCompile(`^\*+ `)
|
||||
|
||||
// lesserHeading returns true if text is a heading of a lesser or equal level
|
||||
// than that denoted by prefix.
|
||||
func lesserHeading(text, prefix string) bool {
|
||||
return isHeading.MatchString(text) && !strings.HasPrefix(text, prefix+"*")
|
||||
}
|
||||
|
||||
// parseSections parses Sections from lines for the section level indicated by
|
||||
// number (a nil number indicates the top level).
|
||||
func parseSections(ctx *Context, name string, lines *Lines, number []int) ([]Section, error) {
|
||||
var sections []Section
|
||||
for i := 1; ; i++ {
|
||||
// Next non-empty line is title.
|
||||
text, ok := lines.nextNonEmpty()
|
||||
for ok && text == "" {
|
||||
text, ok = lines.next()
|
||||
}
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
prefix := strings.Repeat("*", len(number)+1)
|
||||
if !strings.HasPrefix(text, prefix+" ") {
|
||||
lines.back()
|
||||
break
|
||||
}
|
||||
section := Section{
|
||||
Number: append(append([]int{}, number...), i),
|
||||
Title: text[len(prefix)+1:],
|
||||
}
|
||||
text, ok = lines.nextNonEmpty()
|
||||
for ok && !lesserHeading(text, prefix) {
|
||||
var e Elem
|
||||
r, _ := utf8.DecodeRuneInString(text)
|
||||
switch {
|
||||
case unicode.IsSpace(r):
|
||||
i := strings.IndexFunc(text, func(r rune) bool {
|
||||
return !unicode.IsSpace(r)
|
||||
})
|
||||
if i < 0 {
|
||||
break
|
||||
}
|
||||
indent := text[:i]
|
||||
var s []string
|
||||
for ok && (strings.HasPrefix(text, indent) || text == "") {
|
||||
if text != "" {
|
||||
text = text[i:]
|
||||
}
|
||||
s = append(s, text)
|
||||
text, ok = lines.next()
|
||||
}
|
||||
lines.back()
|
||||
pre := strings.Join(s, "\n")
|
||||
pre = strings.Replace(pre, "\t", " ", -1) // browsers treat tabs badly
|
||||
pre = strings.TrimRightFunc(pre, unicode.IsSpace)
|
||||
e = Text{Lines: []string{pre}, Pre: true}
|
||||
case strings.HasPrefix(text, "- "):
|
||||
var b []string
|
||||
for ok && strings.HasPrefix(text, "- ") {
|
||||
b = append(b, text[2:])
|
||||
text, ok = lines.next()
|
||||
}
|
||||
lines.back()
|
||||
e = List{Bullet: b}
|
||||
case isSpeakerNote(text):
|
||||
section.Notes = append(section.Notes, text[2:])
|
||||
case strings.HasPrefix(text, prefix+"* "):
|
||||
lines.back()
|
||||
subsecs, err := parseSections(ctx, name, lines, section.Number)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, ss := range subsecs {
|
||||
section.Elem = append(section.Elem, ss)
|
||||
}
|
||||
case strings.HasPrefix(text, "."):
|
||||
args := strings.Fields(text)
|
||||
if args[0] == ".background" {
|
||||
section.Classes = append(section.Classes, "background")
|
||||
section.Styles = append(section.Styles, "background-image: url('"+args[1]+"')")
|
||||
break
|
||||
}
|
||||
parser := parsers[args[0]]
|
||||
if parser == nil {
|
||||
return nil, fmt.Errorf("%s:%d: unknown command %q\n", name, lines.line, text)
|
||||
}
|
||||
t, err := parser(ctx, name, lines.line, text)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e = t
|
||||
default:
|
||||
var l []string
|
||||
for ok && strings.TrimSpace(text) != "" {
|
||||
if text[0] == '.' { // Command breaks text block.
|
||||
lines.back()
|
||||
break
|
||||
}
|
||||
if strings.HasPrefix(text, `\.`) { // Backslash escapes initial period.
|
||||
text = text[1:]
|
||||
}
|
||||
l = append(l, text)
|
||||
text, ok = lines.next()
|
||||
}
|
||||
if len(l) > 0 {
|
||||
e = Text{Lines: l}
|
||||
}
|
||||
}
|
||||
if e != nil {
|
||||
section.Elem = append(section.Elem, e)
|
||||
}
|
||||
text, ok = lines.nextNonEmpty()
|
||||
}
|
||||
if isHeading.MatchString(text) {
|
||||
lines.back()
|
||||
}
|
||||
sections = append(sections, section)
|
||||
}
|
||||
return sections, nil
|
||||
}
|
||||
|
||||
func parseHeader(doc *Doc, lines *Lines) error {
|
||||
var ok bool
|
||||
// First non-empty line starts header.
|
||||
doc.Title, ok = lines.nextNonEmpty()
|
||||
if !ok {
|
||||
return errors.New("unexpected EOF; expected title")
|
||||
}
|
||||
for {
|
||||
text, ok := lines.next()
|
||||
if !ok {
|
||||
return errors.New("unexpected EOF")
|
||||
}
|
||||
if text == "" {
|
||||
break
|
||||
}
|
||||
if isSpeakerNote(text) {
|
||||
continue
|
||||
}
|
||||
const tagPrefix = "Tags:"
|
||||
if strings.HasPrefix(text, tagPrefix) {
|
||||
tags := strings.Split(text[len(tagPrefix):], ",")
|
||||
for i := range tags {
|
||||
tags[i] = strings.TrimSpace(tags[i])
|
||||
}
|
||||
doc.Tags = append(doc.Tags, tags...)
|
||||
} else if t, ok := parseTime(text); ok {
|
||||
doc.Time = t
|
||||
} else if doc.Subtitle == "" {
|
||||
doc.Subtitle = text
|
||||
} else {
|
||||
return fmt.Errorf("unexpected header line: %q", text)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseAuthors(lines *Lines) (authors []Author, err error) {
|
||||
// This grammar demarcates authors with blanks.
|
||||
|
||||
// Skip blank lines.
|
||||
if _, ok := lines.nextNonEmpty(); !ok {
|
||||
return nil, errors.New("unexpected EOF")
|
||||
}
|
||||
lines.back()
|
||||
|
||||
var a *Author
|
||||
for {
|
||||
text, ok := lines.next()
|
||||
if !ok {
|
||||
return nil, errors.New("unexpected EOF")
|
||||
}
|
||||
|
||||
// If we find a section heading, we're done.
|
||||
if strings.HasPrefix(text, "* ") {
|
||||
lines.back()
|
||||
break
|
||||
}
|
||||
|
||||
if isSpeakerNote(text) {
|
||||
continue
|
||||
}
|
||||
|
||||
// If we encounter a blank we're done with this author.
|
||||
if a != nil && len(text) == 0 {
|
||||
authors = append(authors, *a)
|
||||
a = nil
|
||||
continue
|
||||
}
|
||||
if a == nil {
|
||||
a = new(Author)
|
||||
}
|
||||
|
||||
// Parse the line. Those that
|
||||
// - begin with @ are twitter names,
|
||||
// - contain slashes are links, or
|
||||
// - contain an @ symbol are an email address.
|
||||
// The rest is just text.
|
||||
var el Elem
|
||||
switch {
|
||||
case strings.HasPrefix(text, "@"):
|
||||
el = parseURL("http://twitter.com/" + text[1:])
|
||||
case strings.Contains(text, ":"):
|
||||
el = parseURL(text)
|
||||
case strings.Contains(text, "@"):
|
||||
el = parseURL("mailto:" + text)
|
||||
}
|
||||
if l, ok := el.(Link); ok {
|
||||
l.Label = text
|
||||
el = l
|
||||
}
|
||||
if el == nil {
|
||||
el = Text{Lines: []string{text}}
|
||||
}
|
||||
a.Elem = append(a.Elem, el)
|
||||
}
|
||||
if a != nil {
|
||||
authors = append(authors, *a)
|
||||
}
|
||||
return authors, nil
|
||||
}
|
||||
|
||||
func parseURL(text string) Elem {
|
||||
u, err := url.Parse(text)
|
||||
if err != nil {
|
||||
log.Printf("Parse(%q): %v", text, err)
|
||||
return nil
|
||||
}
|
||||
return Link{URL: u}
|
||||
}
|
||||
|
||||
func parseTime(text string) (t time.Time, ok bool) {
|
||||
t, err := time.Parse("15:04 2 Jan 2006", text)
|
||||
if err == nil {
|
||||
return t, true
|
||||
}
|
||||
t, err = time.Parse("2 Jan 2006", text)
|
||||
if err == nil {
|
||||
// at 11am UTC it is the same date everywhere
|
||||
t = t.Add(time.Hour * 11)
|
||||
return t, true
|
||||
}
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
func isSpeakerNote(s string) bool {
|
||||
return strings.HasPrefix(s, ": ")
|
||||
}
|
||||
167
vendor/golang.org/x/tools/present/style.go
generated
vendored
Normal file
167
vendor/golang.org/x/tools/present/style.go
generated
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package present
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"html"
|
||||
"html/template"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
/*
|
||||
Fonts are demarcated by an initial and final char bracketing a
|
||||
space-delimited word, plus possibly some terminal punctuation.
|
||||
The chars are
|
||||
_ for italic
|
||||
* for bold
|
||||
` (back quote) for fixed width.
|
||||
Inner appearances of the char become spaces. For instance,
|
||||
_this_is_italic_!
|
||||
becomes
|
||||
<i>this is italic</i>!
|
||||
*/
|
||||
|
||||
func init() {
|
||||
funcs["style"] = Style
|
||||
}
|
||||
|
||||
// Style returns s with HTML entities escaped and font indicators turned into
|
||||
// HTML font tags.
|
||||
func Style(s string) template.HTML {
|
||||
return template.HTML(font(html.EscapeString(s)))
|
||||
}
|
||||
|
||||
// font returns s with font indicators turned into HTML font tags.
|
||||
func font(s string) string {
|
||||
if !strings.ContainsAny(s, "[`_*") {
|
||||
return s
|
||||
}
|
||||
words := split(s)
|
||||
var b bytes.Buffer
|
||||
Word:
|
||||
for w, word := range words {
|
||||
if len(word) < 2 {
|
||||
continue Word
|
||||
}
|
||||
if link, _ := parseInlineLink(word); link != "" {
|
||||
words[w] = link
|
||||
continue Word
|
||||
}
|
||||
const marker = "_*`"
|
||||
// Initial punctuation is OK but must be peeled off.
|
||||
first := strings.IndexAny(word, marker)
|
||||
if first == -1 {
|
||||
continue Word
|
||||
}
|
||||
// Opening marker must be at the beginning of the token or else preceded by punctuation.
|
||||
if first != 0 {
|
||||
r, _ := utf8.DecodeLastRuneInString(word[:first])
|
||||
if !unicode.IsPunct(r) {
|
||||
continue Word
|
||||
}
|
||||
}
|
||||
open, word := word[:first], word[first:]
|
||||
char := word[0] // ASCII is OK.
|
||||
close := ""
|
||||
switch char {
|
||||
default:
|
||||
continue Word
|
||||
case '_':
|
||||
open += "<i>"
|
||||
close = "</i>"
|
||||
case '*':
|
||||
open += "<b>"
|
||||
close = "</b>"
|
||||
case '`':
|
||||
open += "<code>"
|
||||
close = "</code>"
|
||||
}
|
||||
// Closing marker must be at the end of the token or else followed by punctuation.
|
||||
last := strings.LastIndex(word, word[:1])
|
||||
if last == 0 {
|
||||
continue Word
|
||||
}
|
||||
if last+1 != len(word) {
|
||||
r, _ := utf8.DecodeRuneInString(word[last+1:])
|
||||
if !unicode.IsPunct(r) {
|
||||
continue Word
|
||||
}
|
||||
}
|
||||
head, tail := word[:last+1], word[last+1:]
|
||||
b.Reset()
|
||||
b.WriteString(open)
|
||||
var wid int
|
||||
for i := 1; i < len(head)-1; i += wid {
|
||||
var r rune
|
||||
r, wid = utf8.DecodeRuneInString(head[i:])
|
||||
if r != rune(char) {
|
||||
// Ordinary character.
|
||||
b.WriteRune(r)
|
||||
continue
|
||||
}
|
||||
if head[i+1] != char {
|
||||
// Inner char becomes space.
|
||||
b.WriteRune(' ')
|
||||
continue
|
||||
}
|
||||
// Doubled char becomes real char.
|
||||
// Not worth worrying about "_x__".
|
||||
b.WriteByte(char)
|
||||
wid++ // Consumed two chars, both ASCII.
|
||||
}
|
||||
b.WriteString(close) // Write closing tag.
|
||||
b.WriteString(tail) // Restore trailing punctuation.
|
||||
words[w] = b.String()
|
||||
}
|
||||
return strings.Join(words, "")
|
||||
}
|
||||
|
||||
// split is like strings.Fields but also returns the runs of spaces
|
||||
// and treats inline links as distinct words.
|
||||
func split(s string) []string {
|
||||
var (
|
||||
words = make([]string, 0, 10)
|
||||
start = 0
|
||||
)
|
||||
|
||||
// appendWord appends the string s[start:end] to the words slice.
|
||||
// If the word contains the beginning of a link, the non-link portion
|
||||
// of the word and the entire link are appended as separate words,
|
||||
// and the start index is advanced to the end of the link.
|
||||
appendWord := func(end int) {
|
||||
if j := strings.Index(s[start:end], "[["); j > -1 {
|
||||
if _, l := parseInlineLink(s[start+j:]); l > 0 {
|
||||
// Append portion before link, if any.
|
||||
if j > 0 {
|
||||
words = append(words, s[start:start+j])
|
||||
}
|
||||
// Append link itself.
|
||||
words = append(words, s[start+j:start+j+l])
|
||||
// Advance start index to end of link.
|
||||
start = start + j + l
|
||||
return
|
||||
}
|
||||
}
|
||||
// No link; just add the word.
|
||||
words = append(words, s[start:end])
|
||||
start = end
|
||||
}
|
||||
|
||||
wasSpace := false
|
||||
for i, r := range s {
|
||||
isSpace := unicode.IsSpace(r)
|
||||
if i > start && isSpace != wasSpace {
|
||||
appendWord(i)
|
||||
}
|
||||
wasSpace = isSpace
|
||||
}
|
||||
for start < len(s) {
|
||||
appendWord(len(s))
|
||||
}
|
||||
return words
|
||||
}
|
||||
54
vendor/golang.org/x/tools/present/video.go
generated
vendored
Normal file
54
vendor/golang.org/x/tools/present/video.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package present
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register("video", parseVideo)
|
||||
}
|
||||
|
||||
type Video struct {
|
||||
URL string
|
||||
SourceType string
|
||||
Width int
|
||||
Height int
|
||||
}
|
||||
|
||||
func (v Video) TemplateName() string { return "video" }
|
||||
|
||||
func parseVideo(ctx *Context, fileName string, lineno int, text string) (Elem, error) {
|
||||
args := strings.Fields(text)
|
||||
if len(args) < 3 {
|
||||
return nil, fmt.Errorf("incorrect video invocation: %q", text)
|
||||
}
|
||||
vid := Video{URL: args[1], SourceType: args[2]}
|
||||
a, err := parseArgs(fileName, lineno, args[3:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(a) {
|
||||
case 0:
|
||||
// no size parameters
|
||||
case 2:
|
||||
// If a parameter is empty (underscore) or invalid
|
||||
// leave the field set to zero. The "video" action
|
||||
// template will then omit that vid tag attribute and
|
||||
// the browser will calculate the value to preserve
|
||||
// the aspect ratio.
|
||||
if v, ok := a[0].(int); ok {
|
||||
vid.Height = v
|
||||
}
|
||||
if v, ok := a[1].(int); ok {
|
||||
vid.Width = v
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("incorrect video invocation: %q", text)
|
||||
}
|
||||
return vid, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user