Upgrade Go extractor compiler and dependency versions

This commit is contained in:
Chris Smowton
2023-02-14 17:34:47 +00:00
parent 029e1d47fe
commit 9e584eb241
41 changed files with 1208 additions and 1290 deletions

View File

@@ -3,11 +3,11 @@ module github.com/github/codeql-go
go 1.18 go 1.18
require ( require (
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 golang.org/x/mod v0.8.0
golang.org/x/tools v0.1.12 golang.org/x/tools v0.6.0
) )
require ( require (
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect golang.org/x/sys v0.5.0 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
) )

View File

@@ -6,6 +6,8 @@ golang.org/x/mod v0.5.0 h1:UG21uOlmZabA4fW5i7ZX6bjw1xELEGg/ZLgZq9auk/Q=
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
@@ -19,6 +21,8 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0v
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -28,6 +32,8 @@ golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=

View File

@@ -494,7 +494,7 @@ func (in *input) endToken(kind tokenKind) {
in.token.endPos = in.pos in.token.endPos = in.pos
} }
// peek returns the kind of the the next token returned by lex. // peek returns the kind of the next token returned by lex.
func (in *input) peek() tokenKind { func (in *input) peek() tokenKind {
return in.token.kind return in.token.kind
} }

View File

@@ -513,6 +513,9 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V
nv := "" nv := ""
if len(args) == arrow+2 { if len(args) == arrow+2 {
if !IsDirectoryPath(ns) { if !IsDirectoryPath(ns) {
if strings.Contains(ns, "@") {
return nil, errorf("replacement module must match format 'path version', not 'path@version'")
}
return nil, errorf("replacement module without version must be directory path (rooted or starting with ./ or ../)") return nil, errorf("replacement module without version must be directory path (rooted or starting with ./ or ../)")
} }
if filepath.Separator == '/' && strings.Contains(ns, `\`) { if filepath.Separator == '/' && strings.Contains(ns, `\`) {

View File

@@ -96,13 +96,13 @@ package module
// Changes to the semantics in this file require approval from rsc. // Changes to the semantics in this file require approval from rsc.
import ( import (
"errors"
"fmt" "fmt"
"path" "path"
"sort" "sort"
"strings" "strings"
"unicode" "unicode"
"unicode/utf8" "unicode/utf8"
"errors"
"golang.org/x/mod/semver" "golang.org/x/mod/semver"
) )
@@ -258,7 +258,7 @@ func modPathOK(r rune) bool {
return false return false
} }
// modPathOK reports whether r can appear in a package import path element. // importPathOK reports whether r can appear in a package import path element.
// //
// Import paths are intermediate between module paths and file paths: we allow // Import paths are intermediate between module paths and file paths: we allow
// disallow characters that would be confusing or ambiguous as arguments to // disallow characters that would be confusing or ambiguous as arguments to

View File

@@ -7,9 +7,11 @@
package execabs package execabs
import "strings" import (
"errors"
"os/exec"
)
func isGo119ErrDot(err error) bool { func isGo119ErrDot(err error) bool {
// TODO: return errors.Is(err, exec.ErrDot) return errors.Is(err, exec.ErrDot)
return strings.Contains(err.Error(), "current directory")
} }

View File

@@ -27,10 +27,9 @@ import (
"go/token" "go/token"
"go/types" "go/types"
"io" "io"
"io/ioutil"
"os/exec" "os/exec"
"golang.org/x/tools/go/internal/gcimporter" "golang.org/x/tools/internal/gcimporter"
) )
// Find returns the name of an object (.o) or archive (.a) file // Find returns the name of an object (.o) or archive (.a) file
@@ -85,9 +84,26 @@ func NewReader(r io.Reader) (io.Reader, error) {
} }
} }
// readAll works the same way as io.ReadAll, but avoids allocations and copies
// by preallocating a byte slice of the necessary size if the size is known up
// front. This is always possible when the input is an archive. In that case,
// NewReader will return the known size using an io.LimitedReader.
func readAll(r io.Reader) ([]byte, error) {
if lr, ok := r.(*io.LimitedReader); ok {
data := make([]byte, lr.N)
_, err := io.ReadFull(lr, data)
return data, err
}
return io.ReadAll(r)
}
// Read reads export data from in, decodes it, and returns type // Read reads export data from in, decodes it, and returns type
// information for the package. // information for the package.
// The package name is specified by path. //
// The package path (effectively its linker symbol prefix) is
// specified by path, since unlike the package name, this information
// may not be recorded in the export data.
//
// File position information is added to fset. // File position information is added to fset.
// //
// Read may inspect and add to the imports map to ensure that references // Read may inspect and add to the imports map to ensure that references
@@ -98,7 +114,7 @@ func NewReader(r io.Reader) (io.Reader, error) {
// //
// On return, the state of the reader is undefined. // On return, the state of the reader is undefined.
func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
data, err := ioutil.ReadAll(in) data, err := readAll(in)
if err != nil { if err != nil {
return nil, fmt.Errorf("reading export data for %q: %v", path, err) return nil, fmt.Errorf("reading export data for %q: %v", path, err)
} }
@@ -107,12 +123,6 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
} }
// The App Engine Go runtime v1.6 uses the old export data format.
// TODO(adonovan): delete once v1.7 has been around for a while.
if bytes.HasPrefix(data, []byte("package ")) {
return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
}
// The indexed export format starts with an 'i'; the older // The indexed export format starts with an 'i'; the older
// binary export format starts with a 'c', 'd', or 'v' // binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer. // (from "version"). Select appropriate importer.
@@ -161,7 +171,7 @@ func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
// //
// Experimental: This API is experimental and may change in the future. // Experimental: This API is experimental and may change in the future.
func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) { func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) {
data, err := ioutil.ReadAll(in) data, err := readAll(in)
if err != nil { if err != nil {
return nil, fmt.Errorf("reading export bundle: %v", err) return nil, fmt.Errorf("reading export bundle: %v", err)
} }

File diff suppressed because it is too large Load Diff

View File

@@ -60,6 +60,7 @@ func (r *responseDeduper) addAll(dr *driverResponse) {
for _, root := range dr.Roots { for _, root := range dr.Roots {
r.addRoot(root) r.addRoot(root)
} }
r.dr.GoVersion = dr.GoVersion
} }
func (r *responseDeduper) addPackage(p *Package) { func (r *responseDeduper) addPackage(p *Package) {
@@ -454,11 +455,14 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
if err != nil { if err != nil {
return nil, err return nil, err
} }
seen := make(map[string]*jsonPackage) seen := make(map[string]*jsonPackage)
pkgs := make(map[string]*Package) pkgs := make(map[string]*Package)
additionalErrors := make(map[string][]Error) additionalErrors := make(map[string][]Error)
// Decode the JSON and convert it to Package form. // Decode the JSON and convert it to Package form.
var response driverResponse response := &driverResponse{
GoVersion: goVersion,
}
for dec := json.NewDecoder(buf); dec.More(); { for dec := json.NewDecoder(buf); dec.More(); {
p := new(jsonPackage) p := new(jsonPackage)
if err := dec.Decode(p); err != nil { if err := dec.Decode(p); err != nil {
@@ -600,17 +604,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
// Work around https://golang.org/issue/28749: // Work around https://golang.org/issue/28749:
// cmd/go puts assembly, C, and C++ files in CompiledGoFiles. // cmd/go puts assembly, C, and C++ files in CompiledGoFiles.
// Filter out any elements of CompiledGoFiles that are also in OtherFiles. // Remove files from CompiledGoFiles that are non-go files
// We have to keep this workaround in place until go1.12 is a distant memory. // (or are not files that look like they are from the cache).
if len(pkg.OtherFiles) > 0 { if len(pkg.CompiledGoFiles) > 0 {
other := make(map[string]bool, len(pkg.OtherFiles))
for _, f := range pkg.OtherFiles {
other[f] = true
}
out := pkg.CompiledGoFiles[:0] out := pkg.CompiledGoFiles[:0]
for _, f := range pkg.CompiledGoFiles { for _, f := range pkg.CompiledGoFiles {
if other[f] { if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file
continue continue
} }
out = append(out, f) out = append(out, f)
@@ -730,7 +729,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
} }
sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID })
return &response, nil return response, nil
} }
func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
@@ -756,6 +755,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath
} }
// getGoVersion returns the effective minor version of the go command.
func (state *golistState) getGoVersion() (int, error) { func (state *golistState) getGoVersion() (int, error) {
state.goVersionOnce.Do(func() { state.goVersionOnce.Do(func() {
state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner)

View File

@@ -15,10 +15,12 @@ import (
"go/scanner" "go/scanner"
"go/token" "go/token"
"go/types" "go/types"
"io"
"io/ioutil" "io/ioutil"
"log" "log"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"strings" "strings"
"sync" "sync"
"time" "time"
@@ -233,6 +235,11 @@ type driverResponse struct {
// Imports will be connected and then type and syntax information added in a // Imports will be connected and then type and syntax information added in a
// later pass (see refine). // later pass (see refine).
Packages []*Package Packages []*Package
// GoVersion is the minor version number used by the driver
// (e.g. the go command on the PATH) when selecting .go files.
// Zero means unknown.
GoVersion int
} }
// Load loads and returns the Go packages named by the given patterns. // Load loads and returns the Go packages named by the given patterns.
@@ -256,7 +263,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) {
return nil, err return nil, err
} }
l.sizes = response.Sizes l.sizes = response.Sizes
return l.refine(response.Roots, response.Packages...) return l.refine(response)
} }
// defaultDriver is a driver that implements go/packages' fallback behavior. // defaultDriver is a driver that implements go/packages' fallback behavior.
@@ -297,6 +304,9 @@ type Package struct {
// of the package, or while parsing or type-checking its files. // of the package, or while parsing or type-checking its files.
Errors []Error Errors []Error
// TypeErrors contains the subset of errors produced during type checking.
TypeErrors []types.Error
// GoFiles lists the absolute file paths of the package's Go source files. // GoFiles lists the absolute file paths of the package's Go source files.
GoFiles []string GoFiles []string
@@ -532,6 +542,7 @@ type loaderPackage struct {
needsrc bool // load from source (Mode >= LoadTypes) needsrc bool // load from source (Mode >= LoadTypes)
needtypes bool // type information is either requested or depended on needtypes bool // type information is either requested or depended on
initial bool // package was matched by a pattern initial bool // package was matched by a pattern
goVersion int // minor version number of go command on PATH
} }
// loader holds the working state of a single call to load. // loader holds the working state of a single call to load.
@@ -618,7 +629,8 @@ func newLoader(cfg *Config) *loader {
// refine connects the supplied packages into a graph and then adds type and // refine connects the supplied packages into a graph and then adds type and
// and syntax information as requested by the LoadMode. // and syntax information as requested by the LoadMode.
func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { func (ld *loader) refine(response *driverResponse) ([]*Package, error) {
roots := response.Roots
rootMap := make(map[string]int, len(roots)) rootMap := make(map[string]int, len(roots))
for i, root := range roots { for i, root := range roots {
rootMap[root] = i rootMap[root] = i
@@ -626,7 +638,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
ld.pkgs = make(map[string]*loaderPackage) ld.pkgs = make(map[string]*loaderPackage)
// first pass, fixup and build the map and roots // first pass, fixup and build the map and roots
var initial = make([]*loaderPackage, len(roots)) var initial = make([]*loaderPackage, len(roots))
for _, pkg := range list { for _, pkg := range response.Packages {
rootIndex := -1 rootIndex := -1
if i, found := rootMap[pkg.ID]; found { if i, found := rootMap[pkg.ID]; found {
rootIndex = i rootIndex = i
@@ -648,6 +660,7 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
Package: pkg, Package: pkg,
needtypes: needtypes, needtypes: needtypes,
needsrc: needsrc, needsrc: needsrc,
goVersion: response.GoVersion,
} }
ld.pkgs[lpkg.ID] = lpkg ld.pkgs[lpkg.ID] = lpkg
if rootIndex >= 0 { if rootIndex >= 0 {
@@ -865,12 +878,19 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
// never has to create a types.Package for an indirect dependency, // never has to create a types.Package for an indirect dependency,
// which would then require that such created packages be explicitly // which would then require that such created packages be explicitly
// inserted back into the Import graph as a final step after export data loading. // inserted back into the Import graph as a final step after export data loading.
// (Hence this return is after the Types assignment.)
// The Diamond test exercises this case. // The Diamond test exercises this case.
if !lpkg.needtypes && !lpkg.needsrc { if !lpkg.needtypes && !lpkg.needsrc {
return return
} }
if !lpkg.needsrc { if !lpkg.needsrc {
ld.loadFromExportData(lpkg) if err := ld.loadFromExportData(lpkg); err != nil {
lpkg.Errors = append(lpkg.Errors, Error{
Pos: "-",
Msg: err.Error(),
Kind: UnknownError, // e.g. can't find/open/parse export data
})
}
return // not a source package, don't get syntax trees return // not a source package, don't get syntax trees
} }
@@ -902,6 +922,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
case types.Error: case types.Error:
// from type checker // from type checker
lpkg.TypeErrors = append(lpkg.TypeErrors, err)
errs = append(errs, Error{ errs = append(errs, Error{
Pos: err.Fset.Position(err.Pos).String(), Pos: err.Fset.Position(err.Pos).String(),
Msg: err.Msg, Msg: err.Msg,
@@ -923,11 +944,41 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
lpkg.Errors = append(lpkg.Errors, errs...) lpkg.Errors = append(lpkg.Errors, errs...)
} }
// If the go command on the PATH is newer than the runtime,
// then the go/{scanner,ast,parser,types} packages from the
// standard library may be unable to process the files
// selected by go list.
//
// There is currently no way to downgrade the effective
// version of the go command (see issue 52078), so we proceed
// with the newer go command but, in case of parse or type
// errors, we emit an additional diagnostic.
//
// See:
// - golang.org/issue/52078 (flag to set release tags)
// - golang.org/issue/50825 (gopls legacy version support)
// - golang.org/issue/55883 (go/packages confusing error)
//
// Should we assert a hard minimum of (currently) go1.16 here?
var runtimeVersion int
if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion {
defer func() {
if len(lpkg.Errors) > 0 {
appendError(Error{
Pos: "-",
Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion),
Kind: UnknownError,
})
}
}()
}
if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" {
// The config requested loading sources and types, but sources are missing. // The config requested loading sources and types, but sources are missing.
// Add an error to the package and fall back to loading from export data. // Add an error to the package and fall back to loading from export data.
appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError})
ld.loadFromExportData(lpkg) _ = ld.loadFromExportData(lpkg) // ignore any secondary errors
return // can't get syntax trees for this package return // can't get syntax trees for this package
} }
@@ -981,7 +1032,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
tc := &types.Config{ tc := &types.Config{
Importer: importer, Importer: importer,
// Type-check bodies of functions only in non-initial packages. // Type-check bodies of functions only in initial packages.
// Example: for import graph A->B->C and initial packages {A,C}, // Example: for import graph A->B->C and initial packages {A,C},
// we can ignore function bodies in B. // we can ignore function bodies in B.
IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial,
@@ -1151,9 +1202,10 @@ func sameFile(x, y string) bool {
return false return false
} }
// loadFromExportData returns type information for the specified // loadFromExportData ensures that type information is present for the specified
// package, loading it from an export data file on the first request. // package, loading it from an export data file on the first request.
func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) { // On success it sets lpkg.Types to a new Package.
func (ld *loader) loadFromExportData(lpkg *loaderPackage) error {
if lpkg.PkgPath == "" { if lpkg.PkgPath == "" {
log.Fatalf("internal error: Package %s has no PkgPath", lpkg) log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
} }
@@ -1164,8 +1216,8 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
// must be sequential. (Finer-grained locking would require // must be sequential. (Finer-grained locking would require
// changes to the gcexportdata API.) // changes to the gcexportdata API.)
// //
// The exportMu lock guards the Package.Pkg field and the // The exportMu lock guards the lpkg.Types field and the
// types.Package it points to, for each Package in the graph. // types.Package it points to, for each loaderPackage in the graph.
// //
// Not all accesses to Package.Pkg need to be protected by exportMu: // Not all accesses to Package.Pkg need to be protected by exportMu:
// graph ordering ensures that direct dependencies of source // graph ordering ensures that direct dependencies of source
@@ -1174,18 +1226,18 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
defer ld.exportMu.Unlock() defer ld.exportMu.Unlock()
if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
return tpkg, nil // cache hit return nil // cache hit
} }
lpkg.IllTyped = true // fail safe lpkg.IllTyped = true // fail safe
if lpkg.ExportFile == "" { if lpkg.ExportFile == "" {
// Errors while building export data will have been printed to stderr. // Errors while building export data will have been printed to stderr.
return nil, fmt.Errorf("no export data file") return fmt.Errorf("no export data file")
} }
f, err := os.Open(lpkg.ExportFile) f, err := os.Open(lpkg.ExportFile)
if err != nil { if err != nil {
return nil, err return err
} }
defer f.Close() defer f.Close()
@@ -1197,7 +1249,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
// queries.) // queries.)
r, err := gcexportdata.NewReader(f) r, err := gcexportdata.NewReader(f)
if err != nil { if err != nil {
return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
} }
// Build the view. // Build the view.
@@ -1241,7 +1293,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
// (May modify incomplete packages in view but not create new ones.) // (May modify incomplete packages in view but not create new ones.)
tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
} }
if _, ok := view["go.shape"]; ok { if _, ok := view["go.shape"]; ok {
// Account for the pseudopackage "go.shape" that gets // Account for the pseudopackage "go.shape" that gets
@@ -1254,8 +1306,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
lpkg.Types = tpkg lpkg.Types = tpkg
lpkg.IllTyped = false lpkg.IllTyped = false
return nil
return tpkg, nil
} }
// impliedLoadMode returns loadMode with its dependencies. // impliedLoadMode returns loadMode with its dependencies.
@@ -1271,3 +1322,5 @@ func impliedLoadMode(loadMode LoadMode) LoadMode {
func usesExportData(cfg *Config) bool { func usesExportData(cfg *Config) bool {
return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
} }
var _ interface{} = io.Discard // assert build toolchain is go1.16 or later

View File

@@ -12,7 +12,6 @@ import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"go/ast"
"go/constant" "go/constant"
"go/token" "go/token"
"go/types" "go/types"
@@ -145,7 +144,7 @@ func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error)
objcount := 0 objcount := 0
scope := pkg.Scope() scope := pkg.Scope()
for _, name := range scope.Names() { for _, name := range scope.Names() {
if !ast.IsExported(name) { if !token.IsExported(name) {
continue continue
} }
if trace { if trace {
@@ -482,7 +481,7 @@ func (p *exporter) method(m *types.Func) {
p.pos(m) p.pos(m)
p.string(m.Name()) p.string(m.Name())
if m.Name() != "_" && !ast.IsExported(m.Name()) { if m.Name() != "_" && !token.IsExported(m.Name()) {
p.pkg(m.Pkg(), false) p.pkg(m.Pkg(), false)
} }
@@ -501,7 +500,7 @@ func (p *exporter) fieldName(f *types.Var) {
// 3) field name doesn't match base type name (alias name) // 3) field name doesn't match base type name (alias name)
bname := basetypeName(f.Type()) bname := basetypeName(f.Type())
if name == bname { if name == bname {
if ast.IsExported(name) { if token.IsExported(name) {
name = "" // 1) we don't need to know the field name or package name = "" // 1) we don't need to know the field name or package
} else { } else {
name = "?" // 2) use unexported name "?" to force package export name = "?" // 2) use unexported name "?" to force package export
@@ -514,7 +513,7 @@ func (p *exporter) fieldName(f *types.Var) {
} }
p.string(name) p.string(name)
if name != "" && !ast.IsExported(name) { if name != "" && !token.IsExported(name) {
p.pkg(f.Pkg(), false) p.pkg(f.Pkg(), false)
} }
} }

View File

@@ -0,0 +1,265 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go.
// Package gcimporter provides various functions for reading
// gc-generated object files that can be used to implement the
// Importer interface defined by the Go 1.5 standard library package.
package gcimporter // import "golang.org/x/tools/internal/gcimporter"
import (
"bufio"
"bytes"
"fmt"
"go/build"
"go/token"
"go/types"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
)
const (
// Enable debug during development: it adds some additional checks, and
// prevents errors from being recovered.
debug = false
// If trace is set, debugging output is printed to std out.
trace = false
)
var exportMap sync.Map // package dir → func() (string, bool)
// lookupGorootExport returns the location of the export data
// (normally found in the build cache, but located in GOROOT/pkg
// in prior Go releases) for the package located in pkgDir.
//
// (We use the package's directory instead of its import path
// mainly to simplify handling of the packages in src/vendor
// and cmd/vendor.)
func lookupGorootExport(pkgDir string) (string, bool) {
f, ok := exportMap.Load(pkgDir)
if !ok {
var (
listOnce sync.Once
exportPath string
)
f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
listOnce.Do(func() {
cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir)
cmd.Dir = build.Default.GOROOT
var output []byte
output, err := cmd.Output()
if err != nil {
return
}
exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
if len(exports) != 1 {
return
}
exportPath = exports[0]
})
return exportPath, exportPath != ""
})
}
return f.(func() (string, bool))()
}
var pkgExts = [...]string{".a", ".o"}
// FindPkg returns the filename and unique package id for an import
// path based on package information provided by build.Import (using
// the build.Default build.Context). A relative srcDir is interpreted
// relative to the current working directory.
// If no file was found, an empty filename is returned.
func FindPkg(path, srcDir string) (filename, id string) {
if path == "" {
return
}
var noext string
switch {
default:
// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
// Don't require the source files to be present.
if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
srcDir = abs
}
bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
if bp.PkgObj == "" {
var ok bool
if bp.Goroot && bp.Dir != "" {
filename, ok = lookupGorootExport(bp.Dir)
}
if !ok {
id = path // make sure we have an id to print in error message
return
}
} else {
noext = strings.TrimSuffix(bp.PkgObj, ".a")
id = bp.ImportPath
}
case build.IsLocalImport(path):
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
noext = filepath.Join(srcDir, path)
id = noext
case filepath.IsAbs(path):
// for completeness only - go/build.Import
// does not support absolute imports
// "/x" -> "/x.ext", "/x"
noext = path
id = path
}
if false { // for debugging
if path != id {
fmt.Printf("%s -> %s\n", path, id)
}
}
if filename != "" {
if f, err := os.Stat(filename); err == nil && !f.IsDir() {
return
}
}
// try extensions
for _, ext := range pkgExts {
filename = noext + ext
if f, err := os.Stat(filename); err == nil && !f.IsDir() {
return
}
}
filename = "" // not found
return
}
// Import imports a gc-generated package given its import path and srcDir, adds
// the corresponding package object to the packages map, and returns the object.
// The packages map must contain all packages already imported.
func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
var rc io.ReadCloser
var filename, id string
if lookup != nil {
// With custom lookup specified, assume that caller has
// converted path to a canonical import path for use in the map.
if path == "unsafe" {
return types.Unsafe, nil
}
id = path
// No need to re-import if the package was imported completely before.
if pkg = packages[id]; pkg != nil && pkg.Complete() {
return
}
f, err := lookup(path)
if err != nil {
return nil, err
}
rc = f
} else {
filename, id = FindPkg(path, srcDir)
if filename == "" {
if path == "unsafe" {
return types.Unsafe, nil
}
return nil, fmt.Errorf("can't find import: %q", id)
}
// no need to re-import if the package was imported completely before
if pkg = packages[id]; pkg != nil && pkg.Complete() {
return
}
// open file
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
// add file name to error
err = fmt.Errorf("%s: %v", filename, err)
}
}()
rc = f
}
defer rc.Close()
var hdr string
var size int64
buf := bufio.NewReader(rc)
if hdr, size, err = FindExportData(buf); err != nil {
return
}
switch hdr {
case "$$B\n":
var data []byte
data, err = ioutil.ReadAll(buf)
if err != nil {
break
}
// TODO(gri): allow clients of go/importer to provide a FileSet.
// Or, define a new standard go/types/gcexportdata package.
fset := token.NewFileSet()
// The indexed export format starts with an 'i'; the older
// binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer.
if len(data) > 0 {
switch data[0] {
case 'i':
_, pkg, err := IImportData(fset, packages, data[1:], id)
return pkg, err
case 'v', 'c', 'd':
_, pkg, err := BImportData(fset, packages, data, id)
return pkg, err
case 'u':
_, pkg, err := UImportData(fset, packages, data[1:size], id)
return pkg, err
default:
l := len(data)
if l > 10 {
l = 10
}
return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
}
}
default:
err = fmt.Errorf("unknown export data header: %q", hdr)
}
return
}
func deref(typ types.Type) types.Type {
if p, _ := typ.(*types.Pointer); p != nil {
return p.Elem()
}
return typ
}
type byPath []*types.Package
func (a byPath) Len() int { return len(a) }
func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }

View File

@@ -12,7 +12,6 @@ import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"go/ast"
"go/constant" "go/constant"
"go/token" "go/token"
"go/types" "go/types"
@@ -23,9 +22,45 @@ import (
"strconv" "strconv"
"strings" "strings"
"golang.org/x/tools/internal/tokeninternal"
"golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typeparams"
) )
// IExportShallow encodes "shallow" export data for the specified package.
//
// No promises are made about the encoding other than that it can be
// decoded by the same version of IIExportShallow. If you plan to save
// export data in the file system, be sure to include a cryptographic
// digest of the executable in the key to avoid version skew.
func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) {
// In principle this operation can only fail if out.Write fails,
// but that's impossible for bytes.Buffer---and as a matter of
// fact iexportCommon doesn't even check for I/O errors.
// TODO(adonovan): handle I/O errors properly.
// TODO(adonovan): use byte slices throughout, avoiding copying.
const bundle, shallow = false, true
var out bytes.Buffer
err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg})
return out.Bytes(), err
}
// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow
// in the same executable. This function cannot import data from
// cmd/compile or gcexportdata.Write.
func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) {
const bundle = false
pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert)
if err != nil {
return nil, err
}
return pkgs[0], nil
}
// InsertType is the type of a function that creates a types.TypeName
// object for a named type and inserts it into the scope of the
// specified Package.
type InsertType = func(pkg *types.Package, name string)
// Current bundled export format version. Increase with each format change. // Current bundled export format version. Increase with each format change.
// 0: initial implementation // 0: initial implementation
const bundleVersion = 0 const bundleVersion = 0
@@ -36,15 +71,17 @@ const bundleVersion = 0
// The package path of the top-level package will not be recorded, // The package path of the top-level package will not be recorded,
// so that calls to IImportData can override with a provided package path. // so that calls to IImportData can override with a provided package path.
func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
return iexportCommon(out, fset, false, iexportVersion, []*types.Package{pkg}) const bundle, shallow = false, false
return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg})
} }
// IExportBundle writes an indexed export bundle for pkgs to out. // IExportBundle writes an indexed export bundle for pkgs to out.
func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
return iexportCommon(out, fset, true, iexportVersion, pkgs) const bundle, shallow = true, false
return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs)
} }
func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int, pkgs []*types.Package) (err error) { func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) {
if !debug { if !debug {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
@@ -61,6 +98,7 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int,
p := iexporter{ p := iexporter{
fset: fset, fset: fset,
version: version, version: version,
shallow: shallow,
allPkgs: map[*types.Package]bool{}, allPkgs: map[*types.Package]bool{},
stringIndex: map[string]uint64{}, stringIndex: map[string]uint64{},
declIndex: map[types.Object]uint64{}, declIndex: map[types.Object]uint64{},
@@ -82,7 +120,7 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int,
for _, pkg := range pkgs { for _, pkg := range pkgs {
scope := pkg.Scope() scope := pkg.Scope()
for _, name := range scope.Names() { for _, name := range scope.Names() {
if ast.IsExported(name) { if token.IsExported(name) {
p.pushDecl(scope.Lookup(name)) p.pushDecl(scope.Lookup(name))
} }
} }
@@ -101,6 +139,17 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int,
p.doDecl(p.declTodo.popHead()) p.doDecl(p.declTodo.popHead())
} }
// Produce index of offset of each file record in files.
var files intWriter
var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i
if p.shallow {
fileOffset = make([]uint64, len(p.fileInfos))
for i, info := range p.fileInfos {
fileOffset[i] = uint64(files.Len())
p.encodeFile(&files, info.file, info.needed)
}
}
// Append indices to data0 section. // Append indices to data0 section.
dataLen := uint64(p.data0.Len()) dataLen := uint64(p.data0.Len())
w := p.newWriter() w := p.newWriter()
@@ -126,16 +175,75 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle bool, version int,
} }
hdr.uint64(uint64(p.version)) hdr.uint64(uint64(p.version))
hdr.uint64(uint64(p.strings.Len())) hdr.uint64(uint64(p.strings.Len()))
if p.shallow {
hdr.uint64(uint64(files.Len()))
hdr.uint64(uint64(len(fileOffset)))
for _, offset := range fileOffset {
hdr.uint64(offset)
}
}
hdr.uint64(dataLen) hdr.uint64(dataLen)
// Flush output. // Flush output.
io.Copy(out, &hdr) io.Copy(out, &hdr)
io.Copy(out, &p.strings) io.Copy(out, &p.strings)
if p.shallow {
io.Copy(out, &files)
}
io.Copy(out, &p.data0) io.Copy(out, &p.data0)
return nil return nil
} }
// encodeFile writes to w a representation of the file sufficient to
// faithfully restore position information about all needed offsets.
// Mutates the needed array.
func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) {
_ = needed[0] // precondition: needed is non-empty
w.uint64(p.stringOff(file.Name()))
size := uint64(file.Size())
w.uint64(size)
// Sort the set of needed offsets. Duplicates are harmless.
sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] })
lines := tokeninternal.GetLines(file) // byte offset of each line start
w.uint64(uint64(len(lines)))
// Rather than record the entire array of line start offsets,
// we save only a sparse list of (index, offset) pairs for
// the start of each line that contains a needed position.
var sparse [][2]int // (index, offset) pairs
outer:
for i, lineStart := range lines {
lineEnd := size
if i < len(lines)-1 {
lineEnd = uint64(lines[i+1])
}
// Does this line contains a needed offset?
if needed[0] < lineEnd {
sparse = append(sparse, [2]int{i, lineStart})
for needed[0] < lineEnd {
needed = needed[1:]
if len(needed) == 0 {
break outer
}
}
}
}
// Delta-encode the columns.
w.uint64(uint64(len(sparse)))
var prev [2]int
for _, pair := range sparse {
w.uint64(uint64(pair[0] - prev[0]))
w.uint64(uint64(pair[1] - prev[1]))
prev = pair
}
}
// writeIndex writes out an object index. mainIndex indicates whether // writeIndex writes out an object index. mainIndex indicates whether
// we're writing out the main index, which is also read by // we're writing out the main index, which is also read by
// non-compiler tools and includes a complete package description // non-compiler tools and includes a complete package description
@@ -205,7 +313,8 @@ type iexporter struct {
out *bytes.Buffer out *bytes.Buffer
version int version int
localpkg *types.Package shallow bool // don't put types from other packages in the index
localpkg *types.Package // (nil in bundle mode)
// allPkgs tracks all packages that have been referenced by // allPkgs tracks all packages that have been referenced by
// the export data, so we can ensure to include them in the // the export data, so we can ensure to include them in the
@@ -217,6 +326,12 @@ type iexporter struct {
strings intWriter strings intWriter
stringIndex map[string]uint64 stringIndex map[string]uint64
// In shallow mode, object positions are encoded as (file, offset).
// Each file is recorded as a line-number table.
// Only the lines of needed positions are saved faithfully.
fileInfo map[*token.File]uint64 // value is index in fileInfos
fileInfos []*filePositions
data0 intWriter data0 intWriter
declIndex map[types.Object]uint64 declIndex map[types.Object]uint64
tparamNames map[types.Object]string // typeparam->exported name tparamNames map[types.Object]string // typeparam->exported name
@@ -225,6 +340,11 @@ type iexporter struct {
indent int // for tracing support indent int // for tracing support
} }
type filePositions struct {
file *token.File
needed []uint64 // unordered list of needed file offsets
}
func (p *iexporter) trace(format string, args ...interface{}) { func (p *iexporter) trace(format string, args ...interface{}) {
if !trace { if !trace {
// Call sites should also be guarded, but having this check here allows // Call sites should also be guarded, but having this check here allows
@@ -248,6 +368,25 @@ func (p *iexporter) stringOff(s string) uint64 {
return off return off
} }
// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it.
func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) {
index, ok := p.fileInfo[file]
if !ok {
index = uint64(len(p.fileInfo))
p.fileInfos = append(p.fileInfos, &filePositions{file: file})
if p.fileInfo == nil {
p.fileInfo = make(map[*token.File]uint64)
}
p.fileInfo[file] = index
}
// Record each needed offset.
info := p.fileInfos[index]
offset := uint64(file.Offset(pos))
info.needed = append(info.needed, offset)
return index, offset
}
// pushDecl adds n to the declaration work queue, if not already present. // pushDecl adds n to the declaration work queue, if not already present.
func (p *iexporter) pushDecl(obj types.Object) { func (p *iexporter) pushDecl(obj types.Object) {
// Package unsafe is known to the compiler and predeclared. // Package unsafe is known to the compiler and predeclared.
@@ -256,6 +395,11 @@ func (p *iexporter) pushDecl(obj types.Object) {
panic("cannot export package unsafe") panic("cannot export package unsafe")
} }
// Shallow export data: don't index decls from other packages.
if p.shallow && obj.Pkg() != p.localpkg {
return
}
if _, ok := p.declIndex[obj]; ok { if _, ok := p.declIndex[obj]; ok {
return return
} }
@@ -303,7 +447,13 @@ func (p *iexporter) doDecl(obj types.Object) {
case *types.Func: case *types.Func:
sig, _ := obj.Type().(*types.Signature) sig, _ := obj.Type().(*types.Signature)
if sig.Recv() != nil { if sig.Recv() != nil {
panic(internalErrorf("unexpected method: %v", sig)) // We shouldn't see methods in the package scope,
// but the type checker may repair "func () F() {}"
// to "func (Invalid) F()" and then treat it like "func F()",
// so allow that. See golang/go#57729.
if sig.Recv().Type() != types.Typ[types.Invalid] {
panic(internalErrorf("unexpected method: %v", sig))
}
} }
// Function. // Function.
@@ -415,13 +565,30 @@ func (w *exportWriter) tag(tag byte) {
} }
func (w *exportWriter) pos(pos token.Pos) { func (w *exportWriter) pos(pos token.Pos) {
if w.p.version >= iexportVersionPosCol { if w.p.shallow {
w.posV2(pos)
} else if w.p.version >= iexportVersionPosCol {
w.posV1(pos) w.posV1(pos)
} else { } else {
w.posV0(pos) w.posV0(pos)
} }
} }
// posV2 encoding (used only in shallow mode) records positions as
// (file, offset), where file is the index in the token.File table
// (which records the file name and newline offsets) and offset is a
// byte offset. It effectively ignores //line directives.
func (w *exportWriter) posV2(pos token.Pos) {
if pos == token.NoPos {
w.uint64(0)
return
}
file := w.p.fset.File(pos) // fset must be non-nil
index, offset := w.p.fileIndexAndOffset(file, pos)
w.uint64(1 + index)
w.uint64(offset)
}
func (w *exportWriter) posV1(pos token.Pos) { func (w *exportWriter) posV1(pos token.Pos) {
if w.p.fset == nil { if w.p.fset == nil {
w.int64(0) w.int64(0)
@@ -497,7 +664,7 @@ func (w *exportWriter) pkg(pkg *types.Package) {
w.string(w.exportPath(pkg)) w.string(w.exportPath(pkg))
} }
func (w *exportWriter) qualifiedIdent(obj types.Object) { func (w *exportWriter) qualifiedType(obj *types.TypeName) {
name := w.p.exportName(obj) name := w.p.exportName(obj)
// Ensure any referenced declarations are written out too. // Ensure any referenced declarations are written out too.
@@ -556,11 +723,11 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
return return
} }
w.startType(definedType) w.startType(definedType)
w.qualifiedIdent(t.Obj()) w.qualifiedType(t.Obj())
case *typeparams.TypeParam: case *typeparams.TypeParam:
w.startType(typeParamType) w.startType(typeParamType)
w.qualifiedIdent(t.Obj()) w.qualifiedType(t.Obj())
case *types.Pointer: case *types.Pointer:
w.startType(pointerType) w.startType(pointerType)
@@ -602,14 +769,17 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
case *types.Struct: case *types.Struct:
w.startType(structType) w.startType(structType)
w.setPkg(pkg, true)
n := t.NumFields() n := t.NumFields()
if n > 0 {
w.setPkg(t.Field(0).Pkg(), true) // qualifying package for field objects
} else {
w.setPkg(pkg, true)
}
w.uint64(uint64(n)) w.uint64(uint64(n))
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
f := t.Field(i) f := t.Field(i)
w.pos(f.Pos()) w.pos(f.Pos())
w.string(f.Name()) w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg
w.typ(f.Type(), pkg) w.typ(f.Type(), pkg)
w.bool(f.Anonymous()) w.bool(f.Anonymous())
w.string(t.Tag(i)) // note (or tag) w.string(t.Tag(i)) // note (or tag)

View File

@@ -51,6 +51,8 @@ const (
iexportVersionPosCol = 1 iexportVersionPosCol = 1
iexportVersionGo1_18 = 2 iexportVersionGo1_18 = 2
iexportVersionGenerics = 2 iexportVersionGenerics = 2
iexportVersionCurrent = 2
) )
type ident struct { type ident struct {
@@ -83,7 +85,7 @@ const (
// If the export data version is not recognized or the format is otherwise // If the export data version is not recognized or the format is otherwise
// compromised, an error is returned. // compromised, an error is returned.
func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) {
pkgs, err := iimportCommon(fset, imports, data, false, path) pkgs, err := iimportCommon(fset, imports, data, false, path, nil)
if err != nil { if err != nil {
return 0, nil, err return 0, nil, err
} }
@@ -92,11 +94,11 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []
// IImportBundle imports a set of packages from the serialized package bundle. // IImportBundle imports a set of packages from the serialized package bundle.
func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) {
return iimportCommon(fset, imports, data, true, "") return iimportCommon(fset, imports, data, true, "", nil)
} }
func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string) (pkgs []*types.Package, err error) { func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) {
const currentVersion = 1 const currentVersion = iexportVersionCurrent
version := int64(-1) version := int64(-1)
if !debug { if !debug {
defer func() { defer func() {
@@ -135,19 +137,34 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
} }
sLen := int64(r.uint64()) sLen := int64(r.uint64())
var fLen int64
var fileOffset []uint64
if insert != nil {
// Shallow mode uses a different position encoding.
fLen = int64(r.uint64())
fileOffset = make([]uint64, r.uint64())
for i := range fileOffset {
fileOffset[i] = r.uint64()
}
}
dLen := int64(r.uint64()) dLen := int64(r.uint64())
whence, _ := r.Seek(0, io.SeekCurrent) whence, _ := r.Seek(0, io.SeekCurrent)
stringData := data[whence : whence+sLen] stringData := data[whence : whence+sLen]
declData := data[whence+sLen : whence+sLen+dLen] fileData := data[whence+sLen : whence+sLen+fLen]
r.Seek(sLen+dLen, io.SeekCurrent) declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen]
r.Seek(sLen+fLen+dLen, io.SeekCurrent)
p := iimporter{ p := iimporter{
version: int(version), version: int(version),
ipath: path, ipath: path,
insert: insert,
stringData: stringData, stringData: stringData,
stringCache: make(map[uint64]string), stringCache: make(map[uint64]string),
fileOffset: fileOffset,
fileData: fileData,
fileCache: make([]*token.File, len(fileOffset)),
pkgCache: make(map[uint64]*types.Package), pkgCache: make(map[uint64]*types.Package),
declData: declData, declData: declData,
@@ -185,11 +202,18 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
} else if pkg.Name() != pkgName { } else if pkg.Name() != pkgName {
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
} }
if i == 0 && !bundle {
p.localpkg = pkg
}
p.pkgCache[pkgPathOff] = pkg p.pkgCache[pkgPathOff] = pkg
// Read index for package.
nameIndex := make(map[string]uint64) nameIndex := make(map[string]uint64)
for nSyms := r.uint64(); nSyms > 0; nSyms-- { nSyms := r.uint64()
// In shallow mode we don't expect an index for other packages.
assert(nSyms == 0 || p.localpkg == pkg || p.insert == nil)
for ; nSyms > 0; nSyms-- {
name := p.stringAt(r.uint64()) name := p.stringAt(r.uint64())
nameIndex[name] = r.uint64() nameIndex[name] = r.uint64()
} }
@@ -265,8 +289,14 @@ type iimporter struct {
version int version int
ipath string ipath string
localpkg *types.Package
insert func(pkg *types.Package, name string) // "shallow" mode only
stringData []byte stringData []byte
stringCache map[uint64]string stringCache map[uint64]string
fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i
fileData []byte
fileCache []*token.File // memoized decoding of file encoded as i
pkgCache map[uint64]*types.Package pkgCache map[uint64]*types.Package
declData []byte declData []byte
@@ -308,6 +338,13 @@ func (p *iimporter) doDecl(pkg *types.Package, name string) {
off, ok := p.pkgIndex[pkg][name] off, ok := p.pkgIndex[pkg][name]
if !ok { if !ok {
// In "shallow" mode, call back to the application to
// find the object and insert it into the package scope.
if p.insert != nil {
assert(pkg != p.localpkg)
p.insert(pkg, name) // "can't fail"
return
}
errorf("%v.%v not in index", pkg, name) errorf("%v.%v not in index", pkg, name)
} }
@@ -332,6 +369,55 @@ func (p *iimporter) stringAt(off uint64) string {
return s return s
} }
func (p *iimporter) fileAt(index uint64) *token.File {
file := p.fileCache[index]
if file == nil {
off := p.fileOffset[index]
file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath})
p.fileCache[index] = file
}
return file
}
func (p *iimporter) decodeFile(rd intReader) *token.File {
filename := p.stringAt(rd.uint64())
size := int(rd.uint64())
file := p.fake.fset.AddFile(filename, -1, size)
// SetLines requires a nondecreasing sequence.
// Because it is common for clients to derive the interval
// [start, start+len(name)] from a start position, and we
// want to ensure that the end offset is on the same line,
// we fill in the gaps of the sparse encoding with values
// that strictly increase by the largest possible amount.
// This allows us to avoid having to record the actual end
// offset of each needed line.
lines := make([]int, int(rd.uint64()))
var index, offset int
for i, n := 0, int(rd.uint64()); i < n; i++ {
index += int(rd.uint64())
offset += int(rd.uint64())
lines[index] = offset
// Ensure monotonicity between points.
for j := index - 1; j > 0 && lines[j] == 0; j-- {
lines[j] = lines[j+1] - 1
}
}
// Ensure monotonicity after last point.
for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- {
size--
lines[j] = size
}
if !file.SetLines(lines) {
errorf("SetLines failed: %d", lines) // can't happen
}
return file
}
func (p *iimporter) pkgAt(off uint64) *types.Package { func (p *iimporter) pkgAt(off uint64) *types.Package {
if pkg, ok := p.pkgCache[off]; ok { if pkg, ok := p.pkgCache[off]; ok {
return pkg return pkg
@@ -625,6 +711,9 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) {
} }
func (r *importReader) pos() token.Pos { func (r *importReader) pos() token.Pos {
if r.p.insert != nil { // shallow mode
return r.posv2()
}
if r.p.version >= iexportVersionPosCol { if r.p.version >= iexportVersionPosCol {
r.posv1() r.posv1()
} else { } else {
@@ -661,6 +750,15 @@ func (r *importReader) posv1() {
} }
} }
func (r *importReader) posv2() token.Pos {
file := r.uint64()
if file == 0 {
return token.NoPos
}
tf := r.p.fileAt(file - 1)
return tf.Pos(int(r.uint64()))
}
func (r *importReader) typ() types.Type { func (r *importReader) typ() types.Type {
return r.p.typAt(r.uint64(), nil) return r.p.typAt(r.uint64(), nil)
} }

View File

@@ -21,3 +21,17 @@ func additionalPredeclared() []types.Type {
types.Universe.Lookup("any").Type(), types.Universe.Lookup("any").Type(),
} }
} }
// See cmd/compile/internal/types.SplitVargenSuffix.
func splitVargenSuffix(name string) (base, suffix string) {
i := len(name)
for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
i--
}
const dot = "·"
if i >= len(dot) && name[i-len(dot):i] == dot {
i -= len(dot)
return name[:i], name[i:]
}
return name, ""
}

View File

@@ -14,7 +14,7 @@ import (
"go/types" "go/types"
"strings" "strings"
"golang.org/x/tools/go/internal/pkgbits" "golang.org/x/tools/internal/pkgbits"
) )
// A pkgReader holds the shared state for reading a unified IR package // A pkgReader holds the shared state for reading a unified IR package
@@ -36,6 +36,12 @@ type pkgReader struct {
// laterFns holds functions that need to be invoked at the end of // laterFns holds functions that need to be invoked at the end of
// import reading. // import reading.
laterFns []func() laterFns []func()
// laterFors is used in case of 'type A B' to ensure that B is processed before A.
laterFors map[types.Type]int
// ifaces holds a list of constructed Interfaces, which need to have
// Complete called after importing is done.
ifaces []*types.Interface
} }
// later adds a function to be invoked at the end of import reading. // later adds a function to be invoked at the end of import reading.
@@ -63,6 +69,15 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []
return return
} }
// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing.
func (pr *pkgReader) laterFor(t types.Type, fn func()) {
if pr.laterFors == nil {
pr.laterFors = make(map[types.Type]int)
}
pr.laterFors[t] = len(pr.laterFns)
pr.laterFns = append(pr.laterFns, fn)
}
// readUnifiedPackage reads a package description from the given // readUnifiedPackage reads a package description from the given
// unified IR export data decoder. // unified IR export data decoder.
func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package {
@@ -102,6 +117,10 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st
fn() fn()
} }
for _, iface := range pr.ifaces {
iface.Complete()
}
pkg.MarkComplete() pkg.MarkComplete()
return pkg return pkg
} }
@@ -139,6 +158,17 @@ func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pk
} }
} }
func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
return &reader{
Decoder: pr.TempDecoder(k, idx, marker),
p: pr,
}
}
func (pr *pkgReader) retireReader(r *reader) {
pr.RetireDecoder(&r.Decoder)
}
// @@@ Positions // @@@ Positions
func (r *reader) pos() token.Pos { func (r *reader) pos() token.Pos {
@@ -163,26 +193,29 @@ func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string {
return b return b
} }
r := pr.newReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) var filename string
{
r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase)
// Within types2, position bases have a lot more details (e.g., // Within types2, position bases have a lot more details (e.g.,
// keeping track of where //line directives appeared exactly). // keeping track of where //line directives appeared exactly).
// //
// For go/types, we just track the file name. // For go/types, we just track the file name.
filename := r.String() filename = r.String()
if r.Bool() { // file base if r.Bool() { // file base
// Was: "b = token.NewTrimmedFileBase(filename, true)" // Was: "b = token.NewTrimmedFileBase(filename, true)"
} else { // line base } else { // line base
pos := r.pos() pos := r.pos()
line := r.Uint() line := r.Uint()
col := r.Uint() col := r.Uint()
// Was: "b = token.NewLineBase(pos, filename, true, line, col)" // Was: "b = token.NewLineBase(pos, filename, true, line, col)"
_, _, _ = pos, line, col _, _, _ = pos, line, col
}
pr.retireReader(r)
} }
b := filename b := filename
pr.posBases[idx] = b pr.posBases[idx] = b
return b return b
@@ -231,11 +264,35 @@ func (r *reader) doPkg() *types.Package {
for i := range imports { for i := range imports {
imports[i] = r.pkg() imports[i] = r.pkg()
} }
pkg.SetImports(imports) pkg.SetImports(flattenImports(imports))
return pkg return pkg
} }
// flattenImports returns the transitive closure of all imported
// packages rooted from pkgs.
func flattenImports(pkgs []*types.Package) []*types.Package {
var res []*types.Package
seen := make(map[*types.Package]struct{})
for _, pkg := range pkgs {
if _, ok := seen[pkg]; ok {
continue
}
seen[pkg] = struct{}{}
res = append(res, pkg)
// pkg.Imports() is already flattened.
for _, pkg := range pkg.Imports() {
if _, ok := seen[pkg]; ok {
continue
}
seen[pkg] = struct{}{}
res = append(res, pkg)
}
}
return res
}
// @@@ Types // @@@ Types
func (r *reader) typ() types.Type { func (r *reader) typ() types.Type {
@@ -264,12 +321,15 @@ func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type {
return typ return typ
} }
r := pr.newReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) var typ types.Type
r.dict = dict {
r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx)
typ := r.doTyp() r.dict = dict
assert(typ != nil)
typ = r.doTyp()
assert(typ != nil)
pr.retireReader(r)
}
// See comment in pkgReader.typIdx explaining how this happens. // See comment in pkgReader.typIdx explaining how this happens.
if prev := *where; prev != nil { if prev := *where; prev != nil {
return prev return prev
@@ -372,6 +432,16 @@ func (r *reader) interfaceType() *types.Interface {
if implicit { if implicit {
iface.MarkImplicit() iface.MarkImplicit()
} }
// We need to call iface.Complete(), but if there are any embedded
// defined types, then we may not have set their underlying
// interface type yet. So we need to defer calling Complete until
// after we've called SetUnderlying everywhere.
//
// TODO(mdempsky): After CL 424876 lands, it should be safe to call
// iface.Complete() immediately.
r.p.ifaces = append(r.p.ifaces, iface)
return iface return iface
} }
@@ -425,18 +495,30 @@ func (r *reader) obj() (types.Object, []types.Type) {
} }
func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
objPkg, objName := rname.qualifiedIdent() var objPkg *types.Package
assert(objName != "") var objName string
var tag pkgbits.CodeObj
{
rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) objPkg, objName = rname.qualifiedIdent()
assert(objName != "")
tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
pr.retireReader(rname)
}
if tag == pkgbits.ObjStub { if tag == pkgbits.ObjStub {
assert(objPkg == nil || objPkg == types.Unsafe) assert(objPkg == nil || objPkg == types.Unsafe)
return objPkg, objName return objPkg, objName
} }
// Ignore local types promoted to global scope (#55110).
if _, suffix := splitVargenSuffix(objName); suffix != "" {
return objPkg, objName
}
if objPkg.Scope().Lookup(objName) == nil { if objPkg.Scope().Lookup(objName) == nil {
dict := pr.objDictIdx(idx) dict := pr.objDictIdx(idx)
@@ -477,15 +559,56 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
named.SetTypeParams(r.typeParamNames()) named.SetTypeParams(r.typeParamNames())
// TODO(mdempsky): Rewrite receiver types to underlying is an setUnderlying := func(underlying types.Type) {
// Interface? The go/types importer does this (I think because // If the underlying type is an interface, we need to
// unit tests expected that), but cmd/compile doesn't care // duplicate its methods so we can replace the receiver
// about it, so maybe we can avoid worrying about that here. // parameter's type (#49906).
rhs := r.typ() if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
r.p.later(func() { methods := make([]*types.Func, iface.NumExplicitMethods())
underlying := rhs.Underlying() for i := range methods {
fn := iface.ExplicitMethod(i)
sig := fn.Type().(*types.Signature)
recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic()))
}
embeds := make([]types.Type, iface.NumEmbeddeds())
for i := range embeds {
embeds[i] = iface.EmbeddedType(i)
}
newIface := types.NewInterfaceType(methods, embeds)
r.p.ifaces = append(r.p.ifaces, newIface)
underlying = newIface
}
named.SetUnderlying(underlying) named.SetUnderlying(underlying)
}) }
// Since go.dev/cl/455279, we can assume rhs.Underlying() will
// always be non-nil. However, to temporarily support users of
// older snapshot releases, we continue to fallback to the old
// behavior for now.
//
// TODO(mdempsky): Remove fallback code and simplify after
// allowing time for snapshot users to upgrade.
rhs := r.typ()
if underlying := rhs.Underlying(); underlying != nil {
setUnderlying(underlying)
} else {
pk := r.p
pk.laterFor(named, func() {
// First be sure that the rhs is initialized, if it needs to be initialized.
delete(pk.laterFors, named) // prevent cycles
if i, ok := pk.laterFors[rhs]; ok {
f := pk.laterFns[i]
pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op
f() // initialize RHS
}
setUnderlying(rhs.Underlying())
})
}
for i, n := 0, r.Len(); i < n; i++ { for i, n := 0, r.Len(); i < n; i++ {
named.AddMethod(r.method()) named.AddMethod(r.method())
@@ -502,25 +625,28 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
} }
func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
var dict readerDict var dict readerDict
if implicits := r.Len(); implicits != 0 { {
errorf("unexpected object with %v implicit type parameter(s)", implicits) r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
} if implicits := r.Len(); implicits != 0 {
errorf("unexpected object with %v implicit type parameter(s)", implicits)
}
dict.bounds = make([]typeInfo, r.Len()) dict.bounds = make([]typeInfo, r.Len())
for i := range dict.bounds { for i := range dict.bounds {
dict.bounds[i] = r.typInfo() dict.bounds[i] = r.typInfo()
} }
dict.derived = make([]derivedInfo, r.Len()) dict.derived = make([]derivedInfo, r.Len())
dict.derivedTypes = make([]types.Type, len(dict.derived)) dict.derivedTypes = make([]types.Type, len(dict.derived))
for i := range dict.derived { for i := range dict.derived {
dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
} }
pr.retireReader(r)
}
// function references follow, but reader doesn't need those // function references follow, but reader doesn't need those
return &dict return &dict

View File

@@ -10,8 +10,10 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"log"
"os" "os"
"regexp" "regexp"
"runtime"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@@ -232,6 +234,12 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
return runCmdContext(ctx, cmd) return runCmdContext(ctx, cmd)
} }
// DebugHangingGoCommands may be set by tests to enable additional
// instrumentation (including panics) for debugging hanging Go commands.
//
// See golang/go#54461 for details.
var DebugHangingGoCommands = false
// runCmdContext is like exec.CommandContext except it sends os.Interrupt // runCmdContext is like exec.CommandContext except it sends os.Interrupt
// before os.Kill. // before os.Kill.
func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
@@ -243,11 +251,24 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
resChan <- cmd.Wait() resChan <- cmd.Wait()
}() }()
select { // If we're interested in debugging hanging Go commands, stop waiting after a
case err := <-resChan: // minute and panic with interesting information.
return err if DebugHangingGoCommands {
case <-ctx.Done(): select {
case err := <-resChan:
return err
case <-time.After(1 * time.Minute):
HandleHangingGoCommand(cmd.Process)
case <-ctx.Done():
}
} else {
select {
case err := <-resChan:
return err
case <-ctx.Done():
}
} }
// Cancelled. Interrupt and see if it ends voluntarily. // Cancelled. Interrupt and see if it ends voluntarily.
cmd.Process.Signal(os.Interrupt) cmd.Process.Signal(os.Interrupt)
select { select {
@@ -255,11 +276,63 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
return err return err
case <-time.After(time.Second): case <-time.After(time.Second):
} }
// Didn't shut down in response to interrupt. Kill it hard. // Didn't shut down in response to interrupt. Kill it hard.
cmd.Process.Kill() // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT
// on certain platforms, such as unix.
if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands {
// Don't panic here as this reliably fails on windows with EINVAL.
log.Printf("error killing the Go command: %v", err)
}
// See above: don't wait indefinitely if we're debugging hanging Go commands.
if DebugHangingGoCommands {
select {
case err := <-resChan:
return err
case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill
HandleHangingGoCommand(cmd.Process)
}
}
return <-resChan return <-resChan
} }
func HandleHangingGoCommand(proc *os.Process) {
switch runtime.GOOS {
case "linux", "darwin", "freebsd", "netbsd":
fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND
The gopls test runner has detected a hanging go command. In order to debug
this, the output of ps and lsof/fstat is printed below.
See golang/go#54461 for more details.`)
fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:")
fmt.Fprintln(os.Stderr, "-------------------------")
psCmd := exec.Command("ps", "axo", "ppid,pid,command")
psCmd.Stdout = os.Stderr
psCmd.Stderr = os.Stderr
if err := psCmd.Run(); err != nil {
panic(fmt.Sprintf("running ps: %v", err))
}
listFiles := "lsof"
if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" {
listFiles = "fstat"
}
fmt.Fprintln(os.Stderr, "\n"+listFiles+":")
fmt.Fprintln(os.Stderr, "-----")
listFilesCmd := exec.Command(listFiles)
listFilesCmd.Stdout = os.Stderr
listFilesCmd.Stderr = os.Stderr
if err := listFilesCmd.Run(); err != nil {
panic(fmt.Sprintf("running %s: %v", listFiles, err))
}
}
panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid))
}
func cmdDebugStr(cmd *exec.Cmd) string { func cmdDebugStr(cmd *exec.Cmd) string {
env := make(map[string]string) env := make(map[string]string)
for _, kv := range cmd.Env { for _, kv := range cmd.Env {

View File

@@ -7,11 +7,19 @@ package gocommand
import ( import (
"context" "context"
"fmt" "fmt"
"regexp"
"strings" "strings"
) )
// GoVersion checks the go version by running "go list" with modules off. // GoVersion reports the minor version number of the highest release
// It returns the X in Go 1.X. // tag built into the go command on the PATH.
//
// Note that this may be higher than the version of the go tool used
// to build this application, and thus the versions of the standard
// go/{scanner,parser,ast,types} packages that are linked into it.
// In that case, callers should either downgrade to the version of
// go used to build the application, or report an error that the
// application is too old to use the go command on the PATH.
func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
inv.Verb = "list" inv.Verb = "list"
inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`}
@@ -38,7 +46,7 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
if len(stdout) < 3 { if len(stdout) < 3 {
return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout)
} }
// Split up "[go1.1 go1.15]" // Split up "[go1.1 go1.15]" and return highest go1.X value.
tags := strings.Fields(stdout[1 : len(stdout)-2]) tags := strings.Fields(stdout[1 : len(stdout)-2])
for i := len(tags) - 1; i >= 0; i-- { for i := len(tags) - 1; i >= 0; i-- {
var version int var version int
@@ -49,3 +57,25 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
} }
return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags)
} }
// GoVersionOutput returns the complete output of the go version command.
func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) {
inv.Verb = "version"
goVersion, err := r.Run(ctx, inv)
if err != nil {
return "", err
}
return goVersion.String(), nil
}
// ParseGoVersionOutput extracts the Go version string
// from the output of the "go version" command.
// Given an unrecognized form, it returns an empty string.
func ParseGoVersionOutput(data string) string {
re := regexp.MustCompile(`^go version (go\S+|devel \S+)`)
m := re.FindStringSubmatch(data)
if len(m) != 2 {
return "" // unrecognized version
}
return m[1]
}

View File

@@ -6,9 +6,11 @@ package pkgbits
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"go/constant" "go/constant"
"go/token" "go/token"
"io"
"math/big" "math/big"
"os" "os"
"runtime" "runtime"
@@ -51,6 +53,8 @@ type PkgDecoder struct {
// For example, section K's end positions start at elemEndsEnds[K-1] // For example, section K's end positions start at elemEndsEnds[K-1]
// (or 0, if K==0) and end at elemEndsEnds[K]. // (or 0, if K==0) and end at elemEndsEnds[K].
elemEndsEnds [numRelocs]uint32 elemEndsEnds [numRelocs]uint32
scratchRelocEnt []RelocEnt
} }
// PkgPath returns the package path for the package // PkgPath returns the package path for the package
@@ -94,7 +98,7 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
pos, err := r.Seek(0, os.SEEK_CUR) pos, err := r.Seek(0, io.SeekCurrent)
assert(err == nil) assert(err == nil)
pr.elemData = input[pos:] pr.elemData = input[pos:]
@@ -164,6 +168,21 @@ func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Deco
return r return r
} }
// TempDecoder returns a Decoder for the given (section, index) pair,
// and decodes the given SyncMarker from the element bitstream.
// If possible the Decoder should be RetireDecoder'd when it is no longer
// needed, this will avoid heap allocations.
func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
r := pr.TempDecoderRaw(k, idx)
r.Sync(marker)
return r
}
func (pr *PkgDecoder) RetireDecoder(d *Decoder) {
pr.scratchRelocEnt = d.Relocs
d.Relocs = nil
}
// NewDecoderRaw returns a Decoder for the given (section, index) pair. // NewDecoderRaw returns a Decoder for the given (section, index) pair.
// //
// Most callers should use NewDecoder instead. // Most callers should use NewDecoder instead.
@@ -187,6 +206,30 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
return r return r
} }
func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder {
r := Decoder{
common: pr,
k: k,
Idx: idx,
}
r.Data.Reset(pr.DataIdx(k, idx))
r.Sync(SyncRelocs)
l := r.Len()
if cap(pr.scratchRelocEnt) >= l {
r.Relocs = pr.scratchRelocEnt[:l]
pr.scratchRelocEnt = nil
} else {
r.Relocs = make([]RelocEnt, l)
}
for i := range r.Relocs {
r.Sync(SyncReloc)
r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
}
return r
}
// A Decoder provides methods for decoding an individual element's // A Decoder provides methods for decoding an individual element's
// bitstream data. // bitstream data.
type Decoder struct { type Decoder struct {
@@ -206,11 +249,39 @@ func (r *Decoder) checkErr(err error) {
} }
func (r *Decoder) rawUvarint() uint64 { func (r *Decoder) rawUvarint() uint64 {
x, err := binary.ReadUvarint(&r.Data) x, err := readUvarint(&r.Data)
r.checkErr(err) r.checkErr(err)
return x return x
} }
// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint.
// This avoids the interface conversion and thus has better escape properties,
// which flows up the stack.
func readUvarint(r *strings.Reader) (uint64, error) {
var x uint64
var s uint
for i := 0; i < binary.MaxVarintLen64; i++ {
b, err := r.ReadByte()
if err != nil {
if i > 0 && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return x, err
}
if b < 0x80 {
if i == binary.MaxVarintLen64-1 && b > 1 {
return x, overflow
}
return x | uint64(b)<<s, nil
}
x |= uint64(b&0x7f) << s
s += 7
}
return x, overflow
}
var overflow = errors.New("pkgbits: readUvarint overflows a 64-bit integer")
func (r *Decoder) rawVarint() int64 { func (r *Decoder) rawVarint() int64 {
ux := r.rawUvarint() ux := r.rawUvarint()
@@ -237,7 +308,7 @@ func (r *Decoder) Sync(mWant SyncMarker) {
return return
} }
pos, _ := r.Data.Seek(0, os.SEEK_CUR) // TODO(mdempsky): io.SeekCurrent after #44505 is resolved pos, _ := r.Data.Seek(0, io.SeekCurrent)
mHave := SyncMarker(r.rawUvarint()) mHave := SyncMarker(r.rawUvarint())
writerPCs := make([]int, r.rawUvarint()) writerPCs := make([]int, r.rawUvarint())
for i := range writerPCs { for i := range writerPCs {
@@ -302,7 +373,7 @@ func (r *Decoder) Int64() int64 {
return r.rawVarint() return r.rawVarint()
} }
// Int64 decodes and returns a uint64 value from the element bitstream. // Uint64 decodes and returns a uint64 value from the element bitstream.
func (r *Decoder) Uint64() uint64 { func (r *Decoder) Uint64() uint64 {
r.Sync(SyncUint64) r.Sync(SyncUint64)
return r.rawUvarint() return r.rawUvarint()
@@ -409,8 +480,12 @@ func (r *Decoder) bigFloat() *big.Float {
// PeekPkgPath returns the package path for the specified package // PeekPkgPath returns the package path for the specified package
// index. // index.
func (pr *PkgDecoder) PeekPkgPath(idx Index) string { func (pr *PkgDecoder) PeekPkgPath(idx Index) string {
r := pr.NewDecoder(RelocPkg, idx, SyncPkgDef) var path string
path := r.String() {
r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef)
path = r.String()
pr.RetireDecoder(&r)
}
if path == "" { if path == "" {
path = pr.pkgPath path = pr.pkgPath
} }
@@ -420,14 +495,23 @@ func (pr *PkgDecoder) PeekPkgPath(idx Index) string {
// PeekObj returns the package path, object name, and CodeObj for the // PeekObj returns the package path, object name, and CodeObj for the
// specified object index. // specified object index.
func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
r := pr.NewDecoder(RelocName, idx, SyncObject1) var ridx Index
r.Sync(SyncSym) var name string
r.Sync(SyncPkg) var rcode int
path := pr.PeekPkgPath(r.Reloc(RelocPkg)) {
name := r.String() r := pr.TempDecoder(RelocName, idx, SyncObject1)
r.Sync(SyncSym)
r.Sync(SyncPkg)
ridx = r.Reloc(RelocPkg)
name = r.String()
rcode = r.Code(SyncCodeObj)
pr.RetireDecoder(&r)
}
path := pr.PeekPkgPath(ridx)
assert(name != "") assert(name != "")
tag := CodeObj(r.Code(SyncCodeObj)) tag := CodeObj(rcode)
return path, name, tag return path, name, tag
} }

View File

@@ -147,8 +147,9 @@ func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder {
type Encoder struct { type Encoder struct {
p *PkgEncoder p *PkgEncoder
Relocs []RelocEnt Relocs []RelocEnt
Data bytes.Buffer // accumulated element bitstream data RelocMap map[RelocEnt]uint32
Data bytes.Buffer // accumulated element bitstream data
encodingRelocHeader bool encodingRelocHeader bool
@@ -210,15 +211,18 @@ func (w *Encoder) rawVarint(x int64) {
} }
func (w *Encoder) rawReloc(r RelocKind, idx Index) int { func (w *Encoder) rawReloc(r RelocKind, idx Index) int {
// TODO(mdempsky): Use map for lookup; this takes quadratic time. e := RelocEnt{r, idx}
for i, rEnt := range w.Relocs { if w.RelocMap != nil {
if rEnt.Kind == r && rEnt.Idx == idx { if i, ok := w.RelocMap[e]; ok {
return i return int(i)
} }
} else {
w.RelocMap = make(map[RelocEnt]uint32)
} }
i := len(w.Relocs) i := len(w.Relocs)
w.Relocs = append(w.Relocs, RelocEnt{r, idx}) w.RelocMap[e] = uint32(i)
w.Relocs = append(w.Relocs, e)
return i return i
} }
@@ -289,7 +293,7 @@ func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
// Int encodes and writes an int value into the element bitstream. // Int encodes and writes an int value into the element bitstream.
func (w *Encoder) Int(x int) { w.Int64(int64(x)) } func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
// Len encodes and writes a uint value into the element bitstream. // Uint encodes and writes a uint value into the element bitstream.
func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
// Reloc encodes and writes a relocation for the given (section, // Reloc encodes and writes a relocation for the given (section,

View File

@@ -5,11 +5,11 @@
package pkgbits package pkgbits
// A RelocKind indicates a particular section within a unified IR export. // A RelocKind indicates a particular section within a unified IR export.
type RelocKind int type RelocKind int32
// An Index represents a bitstream element index within a particular // An Index represents a bitstream element index within a particular
// section. // section.
type Index int type Index int32
// A relocEnt (relocation entry) is an entry in an element's local // A relocEnt (relocation entry) is an entry in an element's local
// reference table. // reference table.

View File

@@ -0,0 +1,59 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// package tokeninternal provides access to some internal features of the token
// package.
package tokeninternal
import (
"go/token"
"sync"
"unsafe"
)
// GetLines returns the table of line-start offsets from a token.File.
func GetLines(file *token.File) []int {
// token.File has a Lines method on Go 1.21 and later.
if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
return file.Lines()
}
// This declaration must match that of token.File.
// This creates a risk of dependency skew.
// For now we check that the size of the two
// declarations is the same, on the (fragile) assumption
// that future changes would add fields.
type tokenFile119 struct {
_ string
_ int
_ int
mu sync.Mutex // we're not complete monsters
lines []int
_ []struct{}
}
type tokenFile118 struct {
_ *token.FileSet // deleted in go1.19
tokenFile119
}
type uP = unsafe.Pointer
switch unsafe.Sizeof(*file) {
case unsafe.Sizeof(tokenFile118{}):
var ptr *tokenFile118
*(*uP)(uP(&ptr)) = uP(file)
ptr.mu.Lock()
defer ptr.mu.Unlock()
return ptr.lines
case unsafe.Sizeof(tokenFile119{}):
var ptr *tokenFile119
*(*uP)(uP(&ptr)) = uP(file)
ptr.mu.Lock()
defer ptr.mu.Unlock()
return ptr.lines
default:
panic("unexpected token.File size")
}
}

View File

@@ -30,6 +30,12 @@ type ErrorCode int
// convention that "bad" implies a problem with syntax, and "invalid" implies a // convention that "bad" implies a problem with syntax, and "invalid" implies a
// problem with types. // problem with types.
const (
// InvalidSyntaxTree occurs if an invalid syntax tree is provided
// to the type checker. It should never happen.
InvalidSyntaxTree ErrorCode = -1
)
const ( const (
_ ErrorCode = iota _ ErrorCode = iota
@@ -153,12 +159,12 @@ const (
/* decls > var (+ other variable assignment codes) */ /* decls > var (+ other variable assignment codes) */
// UntypedNil occurs when the predeclared (untyped) value nil is used to // UntypedNilUse occurs when the predeclared (untyped) value nil is used to
// initialize a variable declared without an explicit type. // initialize a variable declared without an explicit type.
// //
// Example: // Example:
// var x = nil // var x = nil
UntypedNil UntypedNilUse
// WrongAssignCount occurs when the number of values on the right-hand side // WrongAssignCount occurs when the number of values on the right-hand side
// of an assignment or or initialization expression does not match the number // of an assignment or or initialization expression does not match the number
@@ -1523,4 +1529,32 @@ const (
// Example: // Example:
// type T[P any] struct{ *P } // type T[P any] struct{ *P }
MisplacedTypeParam MisplacedTypeParam
// InvalidUnsafeSliceData occurs when unsafe.SliceData is called with
// an argument that is not of slice type. It also occurs if it is used
// in a package compiled for a language version before go1.20.
//
// Example:
// import "unsafe"
//
// var x int
// var _ = unsafe.SliceData(x)
InvalidUnsafeSliceData
// InvalidUnsafeString occurs when unsafe.String is called with
// a length argument that is not of integer type, negative, or
// out of bounds. It also occurs if it is used in a package
// compiled for a language version before go1.20.
//
// Example:
// import "unsafe"
//
// var b [10]byte
// var _ = unsafe.String(&b[0], -1)
InvalidUnsafeString
// InvalidUnsafeStringData occurs if it is used in a package
// compiled for a language version before go1.20.
_ // not used anymore
) )

View File

@@ -8,6 +8,7 @@ func _() {
// An "invalid array index" compiler error signifies that the constant values have changed. // An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again. // Re-run the stringer command to generate them again.
var x [1]struct{} var x [1]struct{}
_ = x[InvalidSyntaxTree - -1]
_ = x[Test-1] _ = x[Test-1]
_ = x[BlankPkgName-2] _ = x[BlankPkgName-2]
_ = x[MismatchedPkgName-3] _ = x[MismatchedPkgName-3]
@@ -23,7 +24,7 @@ func _() {
_ = x[InvalidConstInit-13] _ = x[InvalidConstInit-13]
_ = x[InvalidConstVal-14] _ = x[InvalidConstVal-14]
_ = x[InvalidConstType-15] _ = x[InvalidConstType-15]
_ = x[UntypedNil-16] _ = x[UntypedNilUse-16]
_ = x[WrongAssignCount-17] _ = x[WrongAssignCount-17]
_ = x[UnassignableOperand-18] _ = x[UnassignableOperand-18]
_ = x[NoNewVar-19] _ = x[NoNewVar-19]
@@ -152,16 +153,27 @@ func _() {
_ = x[MisplacedConstraintIface-142] _ = x[MisplacedConstraintIface-142]
_ = x[InvalidMethodTypeParams-143] _ = x[InvalidMethodTypeParams-143]
_ = x[MisplacedTypeParam-144] _ = x[MisplacedTypeParam-144]
_ = x[InvalidUnsafeSliceData-145]
_ = x[InvalidUnsafeString-146]
} }
const _ErrorCode_name = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParam" const (
_ErrorCode_name_0 = "InvalidSyntaxTree"
_ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString"
)
var _ErrorCode_index = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 215, 231, 250, 258, 274, 292, 309, 327, 351, 359, 374, 390, 408, 425, 440, 447, 458, 481, 496, 508, 519, 534, 548, 563, 578, 591, 600, 614, 629, 640, 655, 664, 680, 700, 718, 737, 749, 768, 787, 803, 820, 839, 853, 864, 879, 892, 907, 923, 937, 953, 968, 985, 1003, 1018, 1028, 1038, 1055, 1077, 1091, 1105, 1125, 1143, 1163, 1181, 1204, 1220, 1235, 1248, 1258, 1270, 1281, 1295, 1308, 1319, 1329, 1344, 1355, 1366, 1379, 1395, 1412, 1436, 1453, 1468, 1478, 1487, 1500, 1516, 1532, 1543, 1558, 1574, 1588, 1604, 1618, 1635, 1655, 1668, 1684, 1698, 1715, 1732, 1749, 1764, 1778, 1792, 1803, 1815, 1828, 1845, 1858, 1869, 1882, 1894, 1903, 1910, 1922, 1938, 1956, 1974, 1989, 2006, 2025, 2039, 2059, 2071, 2095, 2118, 2136} var (
_ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180}
)
func (i ErrorCode) String() string { func (i ErrorCode) String() string {
i -= 1 switch {
if i < 0 || i >= ErrorCode(len(_ErrorCode_index)-1) { case i == -1:
return "ErrorCode(" + strconv.FormatInt(int64(i+1), 10) + ")" return _ErrorCode_name_0
case 1 <= i && i <= 146:
i -= 1
return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]]
default:
return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")"
} }
return _ErrorCode_name[_ErrorCode_index[i]:_ErrorCode_index[i+1]]
} }

11
go/vendor/modules.txt vendored
View File

@@ -1,25 +1,26 @@
# golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 # golang.org/x/mod v0.8.0
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/mod/internal/lazyregexp golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/modfile golang.org/x/mod/modfile
golang.org/x/mod/module golang.org/x/mod/module
golang.org/x/mod/semver golang.org/x/mod/semver
# golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f # golang.org/x/sys v0.5.0
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/sys/execabs golang.org/x/sys/execabs
# golang.org/x/tools v0.1.12 # golang.org/x/tools v0.6.0
## explicit; go 1.18 ## explicit; go 1.18
golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/gcexportdata
golang.org/x/tools/go/internal/gcimporter
golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/internal/packagesdriver
golang.org/x/tools/go/internal/pkgbits
golang.org/x/tools/go/packages golang.org/x/tools/go/packages
golang.org/x/tools/internal/event golang.org/x/tools/internal/event
golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/core
golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/keys
golang.org/x/tools/internal/event/label golang.org/x/tools/internal/event/label
golang.org/x/tools/internal/gcimporter
golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gocommand
golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/packagesinternal
golang.org/x/tools/internal/pkgbits
golang.org/x/tools/internal/tokeninternal
golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typeparams
golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/typesinternal
# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 # golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1