Skip to content

Instantly share code, notes, and snippets.

@hoorayimhelping
Created May 4, 2020 16:44
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save hoorayimhelping/eab737ce39311af9ae255716985e0e60 to your computer and use it in GitHub Desktop.
Save hoorayimhelping/eab737ce39311af9ae255716985e0e60 to your computer and use it in GitHub Desktop.
diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vendor/golang.org/x/tools/cmd/goimports/goimports.go
index 2cca29235..c195caaf8 100644
--- a/vendor/golang.org/x/tools/cmd/goimports/goimports.go
+++ b/vendor/golang.org/x/tools/cmd/goimports/goimports.go
@@ -160,7 +160,12 @@ func processFile(filename string, in io.Reader, out io.Writer, argType argumentT
// filename is "<standard input>"
return errors.New("can't use -w on stdin")
}
- err = ioutil.WriteFile(filename, res, 0)
+ // On Windows, we need to re-set the permissions from the file. See golang/go#38225.
+ var perms os.FileMode
+ if fi, err := os.Stat(filename); err == nil {
+ perms = fi.Mode() & os.ModePerm
+ }
+ err = ioutil.WriteFile(filename, res, perms)
if err != nil {
return err
}
@@ -262,7 +267,7 @@ func gofmtMain() {
if verbose {
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
- options.Env.Debug = true
+ options.Env.Logf = log.Printf
}
if options.TabWidth < 0 {
fmt.Fprintf(os.Stderr, "negative tabwidth %d\n", options.TabWidth)
diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go
index e1468693f..a5cd587e4 100644
--- a/vendor/golang.org/x/tools/cmd/stringer/stringer.go
+++ b/vendor/golang.org/x/tools/cmd/stringer/stringer.go
@@ -610,7 +610,12 @@ func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) {
g.Printf("\t\treturn _%s_name_%d\n", typeName, i)
continue
}
- g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1])
+ if values[0].value == 0 && !values[0].signed {
+ // For an unsigned lower bound of 0, "0 <= i" would be redundant.
+ g.Printf("\tcase i <= %s:\n", &values[len(values)-1])
+ } else {
+ g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1])
+ }
if values[0].value != 0 {
g.Printf("\t\ti -= %s\n", &values[0])
}
diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go
index ea605f4fd..8c9977355 100644
--- a/vendor/golang.org/x/tools/go/analysis/analysis.go
+++ b/vendor/golang.org/x/tools/go/analysis/analysis.go
@@ -7,6 +7,8 @@ import (
"go/token"
"go/types"
"reflect"
+
+ "golang.org/x/tools/internal/analysisinternal"
)
// An Analyzer describes an analysis function and its options.
@@ -69,6 +71,17 @@ type Analyzer struct {
func (a *Analyzer) String() string { return a.Name }
+func init() {
+ // Set the analysisinternal functions to be able to pass type errors
+ // to the Pass type without modifying the go/analysis API.
+ analysisinternal.SetTypeErrors = func(p interface{}, errors []types.Error) {
+ p.(*Pass).typeErrors = errors
+ }
+ analysisinternal.GetTypeErrors = func(p interface{}) []types.Error {
+ return p.(*Pass).typeErrors
+ }
+}
+
// A Pass provides information to the Run function that
// applies a specific analyzer to a single Go package.
//
@@ -138,6 +151,9 @@ type Pass struct {
// WARNING: This is an experimental API and may change in the future.
AllObjectFacts func() []ObjectFact
+ // typeErrors contains types.Errors that are associated with the pkg.
+ typeErrors []types.Error
+
/* Further fields may be added in future. */
// For example, suggested or applied refactorings.
}
diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go
index 1b7b7ed5a..ea56b724e 100644
--- a/vendor/golang.org/x/tools/go/analysis/doc.go
+++ b/vendor/golang.org/x/tools/go/analysis/doc.go
@@ -1,6 +1,6 @@
/*
-The analysis package defines the interface between a modular static
+Package analysis defines the interface between a modular static
analysis and an analysis driver program.
@@ -70,39 +70,6 @@ A driver may use the name, flags, and documentation to provide on-line
help that describes the analyses it performs.
The doc comment contains a brief one-line summary,
optionally followed by paragraphs of explanation.
-The vet command, shown below, is an example of a driver that runs
-multiple analyzers. It is based on the multichecker package
-(see the "Standalone commands" section for details).
-
- $ go build golang.org/x/tools/go/analysis/cmd/vet
- $ ./vet help
- vet is a tool for static analysis of Go programs.
-
- Usage: vet [-flag] [package]
-
- Registered analyzers:
-
- asmdecl report mismatches between assembly files and Go declarations
- assign check for useless assignments
- atomic check for common mistakes using the sync/atomic package
- ...
- unusedresult check for unused results of calls to some functions
-
- $ ./vet help unusedresult
- unusedresult: check for unused results of calls to some functions
-
- Analyzer flags:
-
- -unusedresult.funcs value
- comma-separated list of functions whose results must be used (default Error,String)
- -unusedresult.stringmethods value
- comma-separated list of names of methods of type func() string whose results must be used
-
- Some functions like fmt.Errorf return a result and have no side effects,
- so it is always a mistake to discard the result. This analyzer reports
- calls to certain functions in which the result of the call is ignored.
-
- The set of functions may be controlled using flags.
The Analyzer type has more fields besides those shown above:
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go
index 3e4b19536..2087ceec9 100644
--- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go
+++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go
@@ -275,9 +275,10 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del
// We deleted an entry but now there may be
// a blank line-sized hole where the import was.
- if line-lastLine > 1 {
+ if line-lastLine > 1 || !gen.Rparen.IsValid() {
// There was a blank line immediately preceding the deleted import,
- // so there's no need to close the hole.
+ // so there's no need to close the hole. The right parenthesis is
+ // invalid after AddImport to an import statement without parenthesis.
// Do nothing.
} else if line != fset.File(gen.Rparen).LineCount() {
// There was no blank line. Close the hole.
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
index ddbdd3f08..af5e17fee 100644
--- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
+++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
@@ -90,7 +90,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
// The types argument, if non-empty, enables type-based filtering of
// events. The function f if is called only for nodes whose type
// matches an element of the types slice.
-func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (prune bool)) {
+func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) {
mask := maskOf(types)
for i := 0; i < len(in.events); {
ev := in.events[i]
@@ -114,7 +114,7 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (prun
// supplies each call to f an additional argument, the current
// traversal stack. The stack's first element is the outermost node,
// an *ast.File; its last is the innermost, n.
-func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (prune bool)) {
+func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) {
mask := maskOf(types)
var stack []ast.Node
for i := 0; i < len(in.events); {
@@ -150,7 +150,11 @@ func traverse(files []*ast.File) []event {
extent += int(f.End() - f.Pos())
}
// This estimate is based on the net/http package.
- events := make([]event, 0, extent*33/100)
+ capacity := extent * 33 / 100
+ if capacity > 1e6 {
+ capacity = 1e6 // impose some reasonable maximum
+ }
+ events := make([]event, 0, capacity)
var stack []event
for _, f := range files {
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
index f8363d8fa..0d51acad9 100644
--- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
+++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
@@ -85,15 +85,11 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
}
- // The indexed export format starts with an 'i'; the older
- // binary export format starts with a 'c', 'd', or 'v'
- // (from "version"). Select appropriate importer.
- if len(data) > 0 && data[0] == 'i' {
- _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
- return pkg, err
+ // The indexed export format starts with an 'i'.
+ if len(data) == 0 || data[0] != 'i' {
+ return nil, fmt.Errorf("unknown export data format")
}
-
- _, pkg, err := gcimporter.BImportData(fset, imports, data, path)
+ _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
return pkg, err
}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go
deleted file mode 100644
index a807d0aaa..000000000
--- a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go
+++ /dev/null
@@ -1,852 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Binary package export.
-// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
-// see that file for specification of the format.
-
-package gcimporter
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "go/ast"
- "go/constant"
- "go/token"
- "go/types"
- "math"
- "math/big"
- "sort"
- "strings"
-)
-
-// If debugFormat is set, each integer and string value is preceded by a marker
-// and position information in the encoding. This mechanism permits an importer
-// to recognize immediately when it is out of sync. The importer recognizes this
-// mode automatically (i.e., it can import export data produced with debugging
-// support even if debugFormat is not set at the time of import). This mode will
-// lead to massively larger export data (by a factor of 2 to 3) and should only
-// be enabled during development and debugging.
-//
-// NOTE: This flag is the first flag to enable if importing dies because of
-// (suspected) format errors, and whenever a change is made to the format.
-const debugFormat = false // default: false
-
-// If trace is set, debugging output is printed to std out.
-const trace = false // default: false
-
-// Current export format version. Increase with each format change.
-// Note: The latest binary (non-indexed) export format is at version 6.
-// This exporter is still at level 4, but it doesn't matter since
-// the binary importer can handle older versions just fine.
-// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
-// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE
-// 4: type name objects support type aliases, uses aliasTag
-// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
-// 2: removed unused bool in ODCL export (compiler only)
-// 1: header format change (more regular), export package for _ struct fields
-// 0: Go1.7 encoding
-const exportVersion = 4
-
-// trackAllTypes enables cycle tracking for all types, not just named
-// types. The existing compiler invariants assume that unnamed types
-// that are not completely set up are not used, or else there are spurious
-// errors.
-// If disabled, only named types are tracked, possibly leading to slightly
-// less efficient encoding in rare cases. It also prevents the export of
-// some corner-case type declarations (but those are not handled correctly
-// with with the textual export format either).
-// TODO(gri) enable and remove once issues caused by it are fixed
-const trackAllTypes = false
-
-type exporter struct {
- fset *token.FileSet
- out bytes.Buffer
-
- // object -> index maps, indexed in order of serialization
- strIndex map[string]int
- pkgIndex map[*types.Package]int
- typIndex map[types.Type]int
-
- // position encoding
- posInfoFormat bool
- prevFile string
- prevLine int
-
- // debugging support
- written int // bytes written
- indent int // for trace
-}
-
-// internalError represents an error generated inside this package.
-type internalError string
-
-func (e internalError) Error() string { return "gcimporter: " + string(e) }
-
-func internalErrorf(format string, args ...interface{}) error {
- return internalError(fmt.Sprintf(format, args...))
-}
-
-// BExportData returns binary export data for pkg.
-// If no file set is provided, position info will be missing.
-func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
- defer func() {
- if e := recover(); e != nil {
- if ierr, ok := e.(internalError); ok {
- err = ierr
- return
- }
- // Not an internal error; panic again.
- panic(e)
- }
- }()
-
- p := exporter{
- fset: fset,
- strIndex: map[string]int{"": 0}, // empty string is mapped to 0
- pkgIndex: make(map[*types.Package]int),
- typIndex: make(map[types.Type]int),
- posInfoFormat: true, // TODO(gri) might become a flag, eventually
- }
-
- // write version info
- // The version string must start with "version %d" where %d is the version
- // number. Additional debugging information may follow after a blank; that
- // text is ignored by the importer.
- p.rawStringln(fmt.Sprintf("version %d", exportVersion))
- var debug string
- if debugFormat {
- debug = "debug"
- }
- p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
- p.bool(trackAllTypes)
- p.bool(p.posInfoFormat)
-
- // --- generic export data ---
-
- // populate type map with predeclared "known" types
- for index, typ := range predeclared() {
- p.typIndex[typ] = index
- }
- if len(p.typIndex) != len(predeclared()) {
- return nil, internalError("duplicate entries in type map?")
- }
-
- // write package data
- p.pkg(pkg, true)
- if trace {
- p.tracef("\n")
- }
-
- // write objects
- objcount := 0
- scope := pkg.Scope()
- for _, name := range scope.Names() {
- if !ast.IsExported(name) {
- continue
- }
- if trace {
- p.tracef("\n")
- }
- p.obj(scope.Lookup(name))
- objcount++
- }
-
- // indicate end of list
- if trace {
- p.tracef("\n")
- }
- p.tag(endTag)
-
- // for self-verification only (redundant)
- p.int(objcount)
-
- if trace {
- p.tracef("\n")
- }
-
- // --- end of export data ---
-
- return p.out.Bytes(), nil
-}
-
-func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
- if pkg == nil {
- panic(internalError("unexpected nil pkg"))
- }
-
- // if we saw the package before, write its index (>= 0)
- if i, ok := p.pkgIndex[pkg]; ok {
- p.index('P', i)
- return
- }
-
- // otherwise, remember the package, write the package tag (< 0) and package data
- if trace {
- p.tracef("P%d = { ", len(p.pkgIndex))
- defer p.tracef("} ")
- }
- p.pkgIndex[pkg] = len(p.pkgIndex)
-
- p.tag(packageTag)
- p.string(pkg.Name())
- if emptypath {
- p.string("")
- } else {
- p.string(pkg.Path())
- }
-}
-
-func (p *exporter) obj(obj types.Object) {
- switch obj := obj.(type) {
- case *types.Const:
- p.tag(constTag)
- p.pos(obj)
- p.qualifiedName(obj)
- p.typ(obj.Type())
- p.value(obj.Val())
-
- case *types.TypeName:
- if obj.IsAlias() {
- p.tag(aliasTag)
- p.pos(obj)
- p.qualifiedName(obj)
- } else {
- p.tag(typeTag)
- }
- p.typ(obj.Type())
-
- case *types.Var:
- p.tag(varTag)
- p.pos(obj)
- p.qualifiedName(obj)
- p.typ(obj.Type())
-
- case *types.Func:
- p.tag(funcTag)
- p.pos(obj)
- p.qualifiedName(obj)
- sig := obj.Type().(*types.Signature)
- p.paramList(sig.Params(), sig.Variadic())
- p.paramList(sig.Results(), false)
-
- default:
- panic(internalErrorf("unexpected object %v (%T)", obj, obj))
- }
-}
-
-func (p *exporter) pos(obj types.Object) {
- if !p.posInfoFormat {
- return
- }
-
- file, line := p.fileLine(obj)
- if file == p.prevFile {
- // common case: write line delta
- // delta == 0 means different file or no line change
- delta := line - p.prevLine
- p.int(delta)
- if delta == 0 {
- p.int(-1) // -1 means no file change
- }
- } else {
- // different file
- p.int(0)
- // Encode filename as length of common prefix with previous
- // filename, followed by (possibly empty) suffix. Filenames
- // frequently share path prefixes, so this can save a lot
- // of space and make export data size less dependent on file
- // path length. The suffix is unlikely to be empty because
- // file names tend to end in ".go".
- n := commonPrefixLen(p.prevFile, file)
- p.int(n) // n >= 0
- p.string(file[n:]) // write suffix only
- p.prevFile = file
- p.int(line)
- }
- p.prevLine = line
-}
-
-func (p *exporter) fileLine(obj types.Object) (file string, line int) {
- if p.fset != nil {
- pos := p.fset.Position(obj.Pos())
- file = pos.Filename
- line = pos.Line
- }
- return
-}
-
-func commonPrefixLen(a, b string) int {
- if len(a) > len(b) {
- a, b = b, a
- }
- // len(a) <= len(b)
- i := 0
- for i < len(a) && a[i] == b[i] {
- i++
- }
- return i
-}
-
-func (p *exporter) qualifiedName(obj types.Object) {
- p.string(obj.Name())
- p.pkg(obj.Pkg(), false)
-}
-
-func (p *exporter) typ(t types.Type) {
- if t == nil {
- panic(internalError("nil type"))
- }
-
- // Possible optimization: Anonymous pointer types *T where
- // T is a named type are common. We could canonicalize all
- // such types *T to a single type PT = *T. This would lead
- // to at most one *T entry in typIndex, and all future *T's
- // would be encoded as the respective index directly. Would
- // save 1 byte (pointerTag) per *T and reduce the typIndex
- // size (at the cost of a canonicalization map). We can do
- // this later, without encoding format change.
-
- // if we saw the type before, write its index (>= 0)
- if i, ok := p.typIndex[t]; ok {
- p.index('T', i)
- return
- }
-
- // otherwise, remember the type, write the type tag (< 0) and type data
- if trackAllTypes {
- if trace {
- p.tracef("T%d = {>\n", len(p.typIndex))
- defer p.tracef("<\n} ")
- }
- p.typIndex[t] = len(p.typIndex)
- }
-
- switch t := t.(type) {
- case *types.Named:
- if !trackAllTypes {
- // if we don't track all types, track named types now
- p.typIndex[t] = len(p.typIndex)
- }
-
- p.tag(namedTag)
- p.pos(t.Obj())
- p.qualifiedName(t.Obj())
- p.typ(t.Underlying())
- if !types.IsInterface(t) {
- p.assocMethods(t)
- }
-
- case *types.Array:
- p.tag(arrayTag)
- p.int64(t.Len())
- p.typ(t.Elem())
-
- case *types.Slice:
- p.tag(sliceTag)
- p.typ(t.Elem())
-
- case *dddSlice:
- p.tag(dddTag)
- p.typ(t.elem)
-
- case *types.Struct:
- p.tag(structTag)
- p.fieldList(t)
-
- case *types.Pointer:
- p.tag(pointerTag)
- p.typ(t.Elem())
-
- case *types.Signature:
- p.tag(signatureTag)
- p.paramList(t.Params(), t.Variadic())
- p.paramList(t.Results(), false)
-
- case *types.Interface:
- p.tag(interfaceTag)
- p.iface(t)
-
- case *types.Map:
- p.tag(mapTag)
- p.typ(t.Key())
- p.typ(t.Elem())
-
- case *types.Chan:
- p.tag(chanTag)
- p.int(int(3 - t.Dir())) // hack
- p.typ(t.Elem())
-
- default:
- panic(internalErrorf("unexpected type %T: %s", t, t))
- }
-}
-
-func (p *exporter) assocMethods(named *types.Named) {
- // Sort methods (for determinism).
- var methods []*types.Func
- for i := 0; i < named.NumMethods(); i++ {
- methods = append(methods, named.Method(i))
- }
- sort.Sort(methodsByName(methods))
-
- p.int(len(methods))
-
- if trace && methods != nil {
- p.tracef("associated methods {>\n")
- }
-
- for i, m := range methods {
- if trace && i > 0 {
- p.tracef("\n")
- }
-
- p.pos(m)
- name := m.Name()
- p.string(name)
- if !exported(name) {
- p.pkg(m.Pkg(), false)
- }
-
- sig := m.Type().(*types.Signature)
- p.paramList(types.NewTuple(sig.Recv()), false)
- p.paramList(sig.Params(), sig.Variadic())
- p.paramList(sig.Results(), false)
- p.int(0) // dummy value for go:nointerface pragma - ignored by importer
- }
-
- if trace && methods != nil {
- p.tracef("<\n} ")
- }
-}
-
-type methodsByName []*types.Func
-
-func (x methodsByName) Len() int { return len(x) }
-func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
-
-func (p *exporter) fieldList(t *types.Struct) {
- if trace && t.NumFields() > 0 {
- p.tracef("fields {>\n")
- defer p.tracef("<\n} ")
- }
-
- p.int(t.NumFields())
- for i := 0; i < t.NumFields(); i++ {
- if trace && i > 0 {
- p.tracef("\n")
- }
- p.field(t.Field(i))
- p.string(t.Tag(i))
- }
-}
-
-func (p *exporter) field(f *types.Var) {
- if !f.IsField() {
- panic(internalError("field expected"))
- }
-
- p.pos(f)
- p.fieldName(f)
- p.typ(f.Type())
-}
-
-func (p *exporter) iface(t *types.Interface) {
- // TODO(gri): enable importer to load embedded interfaces,
- // then emit Embeddeds and ExplicitMethods separately here.
- p.int(0)
-
- n := t.NumMethods()
- if trace && n > 0 {
- p.tracef("methods {>\n")
- defer p.tracef("<\n} ")
- }
- p.int(n)
- for i := 0; i < n; i++ {
- if trace && i > 0 {
- p.tracef("\n")
- }
- p.method(t.Method(i))
- }
-}
-
-func (p *exporter) method(m *types.Func) {
- sig := m.Type().(*types.Signature)
- if sig.Recv() == nil {
- panic(internalError("method expected"))
- }
-
- p.pos(m)
- p.string(m.Name())
- if m.Name() != "_" && !ast.IsExported(m.Name()) {
- p.pkg(m.Pkg(), false)
- }
-
- // interface method; no need to encode receiver.
- p.paramList(sig.Params(), sig.Variadic())
- p.paramList(sig.Results(), false)
-}
-
-func (p *exporter) fieldName(f *types.Var) {
- name := f.Name()
-
- if f.Anonymous() {
- // anonymous field - we distinguish between 3 cases:
- // 1) field name matches base type name and is exported
- // 2) field name matches base type name and is not exported
- // 3) field name doesn't match base type name (alias name)
- bname := basetypeName(f.Type())
- if name == bname {
- if ast.IsExported(name) {
- name = "" // 1) we don't need to know the field name or package
- } else {
- name = "?" // 2) use unexported name "?" to force package export
- }
- } else {
- // 3) indicate alias and export name as is
- // (this requires an extra "@" but this is a rare case)
- p.string("@")
- }
- }
-
- p.string(name)
- if name != "" && !ast.IsExported(name) {
- p.pkg(f.Pkg(), false)
- }
-}
-
-func basetypeName(typ types.Type) string {
- switch typ := deref(typ).(type) {
- case *types.Basic:
- return typ.Name()
- case *types.Named:
- return typ.Obj().Name()
- default:
- return "" // unnamed type
- }
-}
-
-func (p *exporter) paramList(params *types.Tuple, variadic bool) {
- // use negative length to indicate unnamed parameters
- // (look at the first parameter only since either all
- // names are present or all are absent)
- n := params.Len()
- if n > 0 && params.At(0).Name() == "" {
- n = -n
- }
- p.int(n)
- for i := 0; i < params.Len(); i++ {
- q := params.At(i)
- t := q.Type()
- if variadic && i == params.Len()-1 {
- t = &dddSlice{t.(*types.Slice).Elem()}
- }
- p.typ(t)
- if n > 0 {
- name := q.Name()
- p.string(name)
- if name != "_" {
- p.pkg(q.Pkg(), false)
- }
- }
- p.string("") // no compiler-specific info
- }
-}
-
-func (p *exporter) value(x constant.Value) {
- if trace {
- p.tracef("= ")
- }
-
- switch x.Kind() {
- case constant.Bool:
- tag := falseTag
- if constant.BoolVal(x) {
- tag = trueTag
- }
- p.tag(tag)
-
- case constant.Int:
- if v, exact := constant.Int64Val(x); exact {
- // common case: x fits into an int64 - use compact encoding
- p.tag(int64Tag)
- p.int64(v)
- return
- }
- // uncommon case: large x - use float encoding
- // (powers of 2 will be encoded efficiently with exponent)
- p.tag(floatTag)
- p.float(constant.ToFloat(x))
-
- case constant.Float:
- p.tag(floatTag)
- p.float(x)
-
- case constant.Complex:
- p.tag(complexTag)
- p.float(constant.Real(x))
- p.float(constant.Imag(x))
-
- case constant.String:
- p.tag(stringTag)
- p.string(constant.StringVal(x))
-
- case constant.Unknown:
- // package contains type errors
- p.tag(unknownTag)
-
- default:
- panic(internalErrorf("unexpected value %v (%T)", x, x))
- }
-}
-
-func (p *exporter) float(x constant.Value) {
- if x.Kind() != constant.Float {
- panic(internalErrorf("unexpected constant %v, want float", x))
- }
- // extract sign (there is no -0)
- sign := constant.Sign(x)
- if sign == 0 {
- // x == 0
- p.int(0)
- return
- }
- // x != 0
-
- var f big.Float
- if v, exact := constant.Float64Val(x); exact {
- // float64
- f.SetFloat64(v)
- } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
- // TODO(gri): add big.Rat accessor to constant.Value.
- r := valueToRat(num)
- f.SetRat(r.Quo(r, valueToRat(denom)))
- } else {
- // Value too large to represent as a fraction => inaccessible.
- // TODO(gri): add big.Float accessor to constant.Value.
- f.SetFloat64(math.MaxFloat64) // FIXME
- }
-
- // extract exponent such that 0.5 <= m < 1.0
- var m big.Float
- exp := f.MantExp(&m)
-
- // extract mantissa as *big.Int
- // - set exponent large enough so mant satisfies mant.IsInt()
- // - get *big.Int from mant
- m.SetMantExp(&m, int(m.MinPrec()))
- mant, acc := m.Int(nil)
- if acc != big.Exact {
- panic(internalError("internal error"))
- }
-
- p.int(sign)
- p.int(exp)
- p.string(string(mant.Bytes()))
-}
-
-func valueToRat(x constant.Value) *big.Rat {
- // Convert little-endian to big-endian.
- // I can't believe this is necessary.
- bytes := constant.Bytes(x)
- for i := 0; i < len(bytes)/2; i++ {
- bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
- }
- return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
-}
-
-func (p *exporter) bool(b bool) bool {
- if trace {
- p.tracef("[")
- defer p.tracef("= %v] ", b)
- }
-
- x := 0
- if b {
- x = 1
- }
- p.int(x)
- return b
-}
-
-// ----------------------------------------------------------------------------
-// Low-level encoders
-
-func (p *exporter) index(marker byte, index int) {
- if index < 0 {
- panic(internalError("invalid index < 0"))
- }
- if debugFormat {
- p.marker('t')
- }
- if trace {
- p.tracef("%c%d ", marker, index)
- }
- p.rawInt64(int64(index))
-}
-
-func (p *exporter) tag(tag int) {
- if tag >= 0 {
- panic(internalError("invalid tag >= 0"))
- }
- if debugFormat {
- p.marker('t')
- }
- if trace {
- p.tracef("%s ", tagString[-tag])
- }
- p.rawInt64(int64(tag))
-}
-
-func (p *exporter) int(x int) {
- p.int64(int64(x))
-}
-
-func (p *exporter) int64(x int64) {
- if debugFormat {
- p.marker('i')
- }
- if trace {
- p.tracef("%d ", x)
- }
- p.rawInt64(x)
-}
-
-func (p *exporter) string(s string) {
- if debugFormat {
- p.marker('s')
- }
- if trace {
- p.tracef("%q ", s)
- }
- // if we saw the string before, write its index (>= 0)
- // (the empty string is mapped to 0)
- if i, ok := p.strIndex[s]; ok {
- p.rawInt64(int64(i))
- return
- }
- // otherwise, remember string and write its negative length and bytes
- p.strIndex[s] = len(p.strIndex)
- p.rawInt64(-int64(len(s)))
- for i := 0; i < len(s); i++ {
- p.rawByte(s[i])
- }
-}
-
-// marker emits a marker byte and position information which makes
-// it easy for a reader to detect if it is "out of sync". Used for
-// debugFormat format only.
-func (p *exporter) marker(m byte) {
- p.rawByte(m)
- // Enable this for help tracking down the location
- // of an incorrect marker when running in debugFormat.
- if false && trace {
- p.tracef("#%d ", p.written)
- }
- p.rawInt64(int64(p.written))
-}
-
-// rawInt64 should only be used by low-level encoders.
-func (p *exporter) rawInt64(x int64) {
- var tmp [binary.MaxVarintLen64]byte
- n := binary.PutVarint(tmp[:], x)
- for i := 0; i < n; i++ {
- p.rawByte(tmp[i])
- }
-}
-
-// rawStringln should only be used to emit the initial version string.
-func (p *exporter) rawStringln(s string) {
- for i := 0; i < len(s); i++ {
- p.rawByte(s[i])
- }
- p.rawByte('\n')
-}
-
-// rawByte is the bottleneck interface to write to p.out.
-// rawByte escapes b as follows (any encoding does that
-// hides '$'):
-//
-// '$' => '|' 'S'
-// '|' => '|' '|'
-//
-// Necessary so other tools can find the end of the
-// export data by searching for "$$".
-// rawByte should only be used by low-level encoders.
-func (p *exporter) rawByte(b byte) {
- switch b {
- case '$':
- // write '$' as '|' 'S'
- b = 'S'
- fallthrough
- case '|':
- // write '|' as '|' '|'
- p.out.WriteByte('|')
- p.written++
- }
- p.out.WriteByte(b)
- p.written++
-}
-
-// tracef is like fmt.Printf but it rewrites the format string
-// to take care of indentation.
-func (p *exporter) tracef(format string, args ...interface{}) {
- if strings.ContainsAny(format, "<>\n") {
- var buf bytes.Buffer
- for i := 0; i < len(format); i++ {
- // no need to deal with runes
- ch := format[i]
- switch ch {
- case '>':
- p.indent++
- continue
- case '<':
- p.indent--
- continue
- }
- buf.WriteByte(ch)
- if ch == '\n' {
- for j := p.indent; j > 0; j-- {
- buf.WriteString(". ")
- }
- }
- }
- format = buf.String()
- }
- fmt.Printf(format, args...)
-}
-
-// Debugging support.
-// (tagString is only used when tracing is enabled)
-var tagString = [...]string{
- // Packages
- -packageTag: "package",
-
- // Types
- -namedTag: "named type",
- -arrayTag: "array",
- -sliceTag: "slice",
- -dddTag: "ddd",
- -structTag: "struct",
- -pointerTag: "pointer",
- -signatureTag: "signature",
- -interfaceTag: "interface",
- -mapTag: "map",
- -chanTag: "chan",
-
- // Values
- -falseTag: "false",
- -trueTag: "true",
- -int64Tag: "int64",
- -floatTag: "float",
- -fractionTag: "fraction",
- -complexTag: "complex",
- -stringTag: "string",
- -unknownTag: "unknown",
-
- // Type aliases
- -aliasTag: "alias",
-}
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
deleted file mode 100644
index e9f73d14a..000000000
--- a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
+++ /dev/null
@@ -1,1039 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go.
-
-package gcimporter
-
-import (
- "encoding/binary"
- "fmt"
- "go/constant"
- "go/token"
- "go/types"
- "sort"
- "strconv"
- "strings"
- "sync"
- "unicode"
- "unicode/utf8"
-)
-
-type importer struct {
- imports map[string]*types.Package
- data []byte
- importpath string
- buf []byte // for reading strings
- version int // export format version
-
- // object lists
- strList []string // in order of appearance
- pathList []string // in order of appearance
- pkgList []*types.Package // in order of appearance
- typList []types.Type // in order of appearance
- interfaceList []*types.Interface // for delayed completion only
- trackAllTypes bool
-
- // position encoding
- posInfoFormat bool
- prevFile string
- prevLine int
- fake fakeFileSet
-
- // debugging support
- debugFormat bool
- read int // bytes read
-}
-
-// BImportData imports a package from the serialized package data
-// and returns the number of bytes consumed and a reference to the package.
-// If the export data version is not recognized or the format is otherwise
-// compromised, an error is returned.
-func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
- // catch panics and return them as errors
- const currentVersion = 6
- version := -1 // unknown version
- defer func() {
- if e := recover(); e != nil {
- // Return a (possibly nil or incomplete) package unchanged (see #16088).
- if version > currentVersion {
- err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
- } else {
- err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
- }
- }
- }()
-
- p := importer{
- imports: imports,
- data: data,
- importpath: path,
- version: version,
- strList: []string{""}, // empty string is mapped to 0
- pathList: []string{""}, // empty string is mapped to 0
- fake: fakeFileSet{
- fset: fset,
- files: make(map[string]*token.File),
- },
- }
-
- // read version info
- var versionstr string
- if b := p.rawByte(); b == 'c' || b == 'd' {
- // Go1.7 encoding; first byte encodes low-level
- // encoding format (compact vs debug).
- // For backward-compatibility only (avoid problems with
- // old installed packages). Newly compiled packages use
- // the extensible format string.
- // TODO(gri) Remove this support eventually; after Go1.8.
- if b == 'd' {
- p.debugFormat = true
- }
- p.trackAllTypes = p.rawByte() == 'a'
- p.posInfoFormat = p.int() != 0
- versionstr = p.string()
- if versionstr == "v1" {
- version = 0
- }
- } else {
- // Go1.8 extensible encoding
- // read version string and extract version number (ignore anything after the version number)
- versionstr = p.rawStringln(b)
- if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
- if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
- version = v
- }
- }
- }
- p.version = version
-
- // read version specific flags - extend as necessary
- switch p.version {
- // case currentVersion:
- // ...
- // fallthrough
- case currentVersion, 5, 4, 3, 2, 1:
- p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
- p.trackAllTypes = p.int() != 0
- p.posInfoFormat = p.int() != 0
- case 0:
- // Go1.7 encoding format - nothing to do here
- default:
- errorf("unknown bexport format version %d (%q)", p.version, versionstr)
- }
-
- // --- generic export data ---
-
- // populate typList with predeclared "known" types
- p.typList = append(p.typList, predeclared()...)
-
- // read package data
- pkg = p.pkg()
-
- // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go)
- objcount := 0
- for {
- tag := p.tagOrIndex()
- if tag == endTag {
- break
- }
- p.obj(tag)
- objcount++
- }
-
- // self-verification
- if count := p.int(); count != objcount {
- errorf("got %d objects; want %d", objcount, count)
- }
-
- // ignore compiler-specific import data
-
- // complete interfaces
- // TODO(gri) re-investigate if we still need to do this in a delayed fashion
- for _, typ := range p.interfaceList {
- typ.Complete()
- }
-
- // record all referenced packages as imports
- list := append(([]*types.Package)(nil), p.pkgList[1:]...)
- sort.Sort(byPath(list))
- pkg.SetImports(list)
-
- // package was imported completely and without errors
- pkg.MarkComplete()
-
- return p.read, pkg, nil
-}
-
-func errorf(format string, args ...interface{}) {
- panic(fmt.Sprintf(format, args...))
-}
-
-func (p *importer) pkg() *types.Package {
- // if the package was seen before, i is its index (>= 0)
- i := p.tagOrIndex()
- if i >= 0 {
- return p.pkgList[i]
- }
-
- // otherwise, i is the package tag (< 0)
- if i != packageTag {
- errorf("unexpected package tag %d version %d", i, p.version)
- }
-
- // read package data
- name := p.string()
- var path string
- if p.version >= 5 {
- path = p.path()
- } else {
- path = p.string()
- }
- if p.version >= 6 {
- p.int() // package height; unused by go/types
- }
-
- // we should never see an empty package name
- if name == "" {
- errorf("empty package name in import")
- }
-
- // an empty path denotes the package we are currently importing;
- // it must be the first package we see
- if (path == "") != (len(p.pkgList) == 0) {
- errorf("package path %q for pkg index %d", path, len(p.pkgList))
- }
-
- // if the package was imported before, use that one; otherwise create a new one
- if path == "" {
- path = p.importpath
- }
- pkg := p.imports[path]
- if pkg == nil {
- pkg = types.NewPackage(path, name)
- p.imports[path] = pkg
- } else if pkg.Name() != name {
- errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path)
- }
- p.pkgList = append(p.pkgList, pkg)
-
- return pkg
-}
-
-// objTag returns the tag value for each object kind.
-func objTag(obj types.Object) int {
- switch obj.(type) {
- case *types.Const:
- return constTag
- case *types.TypeName:
- return typeTag
- case *types.Var:
- return varTag
- case *types.Func:
- return funcTag
- default:
- errorf("unexpected object: %v (%T)", obj, obj) // panics
- panic("unreachable")
- }
-}
-
-func sameObj(a, b types.Object) bool {
- // Because unnamed types are not canonicalized, we cannot simply compare types for
- // (pointer) identity.
- // Ideally we'd check equality of constant values as well, but this is good enough.
- return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type())
-}
-
-func (p *importer) declare(obj types.Object) {
- pkg := obj.Pkg()
- if alt := pkg.Scope().Insert(obj); alt != nil {
- // This can only trigger if we import a (non-type) object a second time.
- // Excluding type aliases, this cannot happen because 1) we only import a package
- // once; and b) we ignore compiler-specific export data which may contain
- // functions whose inlined function bodies refer to other functions that
- // were already imported.
- // However, type aliases require reexporting the original type, so we need
- // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go,
- // method importer.obj, switch case importing functions).
- // TODO(gri) review/update this comment once the gc compiler handles type aliases.
- if !sameObj(obj, alt) {
- errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt)
- }
- }
-}
-
-func (p *importer) obj(tag int) {
- switch tag {
- case constTag:
- pos := p.pos()
- pkg, name := p.qualifiedName()
- typ := p.typ(nil, nil)
- val := p.value()
- p.declare(types.NewConst(pos, pkg, name, typ, val))
-
- case aliasTag:
- // TODO(gri) verify type alias hookup is correct
- pos := p.pos()
- pkg, name := p.qualifiedName()
- typ := p.typ(nil, nil)
- p.declare(types.NewTypeName(pos, pkg, name, typ))
-
- case typeTag:
- p.typ(nil, nil)
-
- case varTag:
- pos := p.pos()
- pkg, name := p.qualifiedName()
- typ := p.typ(nil, nil)
- p.declare(types.NewVar(pos, pkg, name, typ))
-
- case funcTag:
- pos := p.pos()
- pkg, name := p.qualifiedName()
- params, isddd := p.paramList()
- result, _ := p.paramList()
- sig := types.NewSignature(nil, params, result, isddd)
- p.declare(types.NewFunc(pos, pkg, name, sig))
-
- default:
- errorf("unexpected object tag %d", tag)
- }
-}
-
-const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
-
-func (p *importer) pos() token.Pos {
- if !p.posInfoFormat {
- return token.NoPos
- }
-
- file := p.prevFile
- line := p.prevLine
- delta := p.int()
- line += delta
- if p.version >= 5 {
- if delta == deltaNewFile {
- if n := p.int(); n >= 0 {
- // file changed
- file = p.path()
- line = n
- }
- }
- } else {
- if delta == 0 {
- if n := p.int(); n >= 0 {
- // file changed
- file = p.prevFile[:n] + p.string()
- line = p.int()
- }
- }
- }
- p.prevFile = file
- p.prevLine = line
-
- return p.fake.pos(file, line, 0)
-}
-
-// Synthesize a token.Pos
-type fakeFileSet struct {
- fset *token.FileSet
- files map[string]*token.File
-}
-
-func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
- // TODO(mdempsky): Make use of column.
-
- // Since we don't know the set of needed file positions, we
- // reserve maxlines positions per file.
- const maxlines = 64 * 1024
- f := s.files[file]
- if f == nil {
- f = s.fset.AddFile(file, -1, maxlines)
- s.files[file] = f
- // Allocate the fake linebreak indices on first use.
- // TODO(adonovan): opt: save ~512KB using a more complex scheme?
- fakeLinesOnce.Do(func() {
- fakeLines = make([]int, maxlines)
- for i := range fakeLines {
- fakeLines[i] = i
- }
- })
- f.SetLines(fakeLines)
- }
-
- if line > maxlines {
- line = 1
- }
-
- // Treat the file as if it contained only newlines
- // and column=1: use the line number as the offset.
- return f.Pos(line - 1)
-}
-
-var (
- fakeLines []int
- fakeLinesOnce sync.Once
-)
-
-func (p *importer) qualifiedName() (pkg *types.Package, name string) {
- name = p.string()
- pkg = p.pkg()
- return
-}
-
-func (p *importer) record(t types.Type) {
- p.typList = append(p.typList, t)
-}
-
-// A dddSlice is a types.Type representing ...T parameters.
-// It only appears for parameter types and does not escape
-// the importer.
-type dddSlice struct {
- elem types.Type
-}
-
-func (t *dddSlice) Underlying() types.Type { return t }
-func (t *dddSlice) String() string { return "..." + t.elem.String() }
-
-// parent is the package which declared the type; parent == nil means
-// the package currently imported. The parent package is needed for
-// exported struct fields and interface methods which don't contain
-// explicit package information in the export data.
-//
-// A non-nil tname is used as the "owner" of the result type; i.e.,
-// the result type is the underlying type of tname. tname is used
-// to give interface methods a named receiver type where possible.
-func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type {
- // if the type was seen before, i is its index (>= 0)
- i := p.tagOrIndex()
- if i >= 0 {
- return p.typList[i]
- }
-
- // otherwise, i is the type tag (< 0)
- switch i {
- case namedTag:
- // read type object
- pos := p.pos()
- parent, name := p.qualifiedName()
- scope := parent.Scope()
- obj := scope.Lookup(name)
-
- // if the object doesn't exist yet, create and insert it
- if obj == nil {
- obj = types.NewTypeName(pos, parent, name, nil)
- scope.Insert(obj)
- }
-
- if _, ok := obj.(*types.TypeName); !ok {
- errorf("pkg = %s, name = %s => %s", parent, name, obj)
- }
-
- // associate new named type with obj if it doesn't exist yet
- t0 := types.NewNamed(obj.(*types.TypeName), nil, nil)
-
- // but record the existing type, if any
- tname := obj.Type().(*types.Named) // tname is either t0 or the existing type
- p.record(tname)
-
- // read underlying type
- t0.SetUnderlying(p.typ(parent, t0))
-
- // interfaces don't have associated methods
- if types.IsInterface(t0) {
- return tname
- }
-
- // read associated methods
- for i := p.int(); i > 0; i-- {
- // TODO(gri) replace this with something closer to fieldName
- pos := p.pos()
- name := p.string()
- if !exported(name) {
- p.pkg()
- }
-
- recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver?
- params, isddd := p.paramList()
- result, _ := p.paramList()
- p.int() // go:nointerface pragma - discarded
-
- sig := types.NewSignature(recv.At(0), params, result, isddd)
- t0.AddMethod(types.NewFunc(pos, parent, name, sig))
- }
-
- return tname
-
- case arrayTag:
- t := new(types.Array)
- if p.trackAllTypes {
- p.record(t)
- }
-
- n := p.int64()
- *t = *types.NewArray(p.typ(parent, nil), n)
- return t
-
- case sliceTag:
- t := new(types.Slice)
- if p.trackAllTypes {
- p.record(t)
- }
-
- *t = *types.NewSlice(p.typ(parent, nil))
- return t
-
- case dddTag:
- t := new(dddSlice)
- if p.trackAllTypes {
- p.record(t)
- }
-
- t.elem = p.typ(parent, nil)
- return t
-
- case structTag:
- t := new(types.Struct)
- if p.trackAllTypes {
- p.record(t)
- }
-
- *t = *types.NewStruct(p.fieldList(parent))
- return t
-
- case pointerTag:
- t := new(types.Pointer)
- if p.trackAllTypes {
- p.record(t)
- }
-
- *t = *types.NewPointer(p.typ(parent, nil))
- return t
-
- case signatureTag:
- t := new(types.Signature)
- if p.trackAllTypes {
- p.record(t)
- }
-
- params, isddd := p.paramList()
- result, _ := p.paramList()
- *t = *types.NewSignature(nil, params, result, isddd)
- return t
-
- case interfaceTag:
- // Create a dummy entry in the type list. This is safe because we
- // cannot expect the interface type to appear in a cycle, as any
- // such cycle must contain a named type which would have been
- // first defined earlier.
- // TODO(gri) Is this still true now that we have type aliases?
- // See issue #23225.
- n := len(p.typList)
- if p.trackAllTypes {
- p.record(nil)
- }
-
- var embeddeds []types.Type
- for n := p.int(); n > 0; n-- {
- p.pos()
- embeddeds = append(embeddeds, p.typ(parent, nil))
- }
-
- t := newInterface(p.methodList(parent, tname), embeddeds)
- p.interfaceList = append(p.interfaceList, t)
- if p.trackAllTypes {
- p.typList[n] = t
- }
- return t
-
- case mapTag:
- t := new(types.Map)
- if p.trackAllTypes {
- p.record(t)
- }
-
- key := p.typ(parent, nil)
- val := p.typ(parent, nil)
- *t = *types.NewMap(key, val)
- return t
-
- case chanTag:
- t := new(types.Chan)
- if p.trackAllTypes {
- p.record(t)
- }
-
- dir := chanDir(p.int())
- val := p.typ(parent, nil)
- *t = *types.NewChan(dir, val)
- return t
-
- default:
- errorf("unexpected type tag %d", i) // panics
- panic("unreachable")
- }
-}
-
-func chanDir(d int) types.ChanDir {
- // tag values must match the constants in cmd/compile/internal/gc/go.go
- switch d {
- case 1 /* Crecv */ :
- return types.RecvOnly
- case 2 /* Csend */ :
- return types.SendOnly
- case 3 /* Cboth */ :
- return types.SendRecv
- default:
- errorf("unexpected channel dir %d", d)
- return 0
- }
-}
-
-func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) {
- if n := p.int(); n > 0 {
- fields = make([]*types.Var, n)
- tags = make([]string, n)
- for i := range fields {
- fields[i], tags[i] = p.field(parent)
- }
- }
- return
-}
-
-func (p *importer) field(parent *types.Package) (*types.Var, string) {
- pos := p.pos()
- pkg, name, alias := p.fieldName(parent)
- typ := p.typ(parent, nil)
- tag := p.string()
-
- anonymous := false
- if name == "" {
- // anonymous field - typ must be T or *T and T must be a type name
- switch typ := deref(typ).(type) {
- case *types.Basic: // basic types are named types
- pkg = nil // // objects defined in Universe scope have no package
- name = typ.Name()
- case *types.Named:
- name = typ.Obj().Name()
- default:
- errorf("named base type expected")
- }
- anonymous = true
- } else if alias {
- // anonymous field: we have an explicit name because it's an alias
- anonymous = true
- }
-
- return types.NewField(pos, pkg, name, typ, anonymous), tag
-}
-
-func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) {
- if n := p.int(); n > 0 {
- methods = make([]*types.Func, n)
- for i := range methods {
- methods[i] = p.method(parent, baseType)
- }
- }
- return
-}
-
-func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func {
- pos := p.pos()
- pkg, name, _ := p.fieldName(parent)
- // If we don't have a baseType, use a nil receiver.
- // A receiver using the actual interface type (which
- // we don't know yet) will be filled in when we call
- // types.Interface.Complete.
- var recv *types.Var
- if baseType != nil {
- recv = types.NewVar(token.NoPos, parent, "", baseType)
- }
- params, isddd := p.paramList()
- result, _ := p.paramList()
- sig := types.NewSignature(recv, params, result, isddd)
- return types.NewFunc(pos, pkg, name, sig)
-}
-
-func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) {
- name = p.string()
- pkg = parent
- if pkg == nil {
- // use the imported package instead
- pkg = p.pkgList[0]
- }
- if p.version == 0 && name == "_" {
- // version 0 didn't export a package for _ fields
- return
- }
- switch name {
- case "":
- // 1) field name matches base type name and is exported: nothing to do
- case "?":
- // 2) field name matches base type name and is not exported: need package
- name = ""
- pkg = p.pkg()
- case "@":
- // 3) field name doesn't match type name (alias)
- name = p.string()
- alias = true
- fallthrough
- default:
- if !exported(name) {
- pkg = p.pkg()
- }
- }
- return
-}
-
-func (p *importer) paramList() (*types.Tuple, bool) {
- n := p.int()
- if n == 0 {
- return nil, false
- }
- // negative length indicates unnamed parameters
- named := true
- if n < 0 {
- n = -n
- named = false
- }
- // n > 0
- params := make([]*types.Var, n)
- isddd := false
- for i := range params {
- params[i], isddd = p.param(named)
- }
- return types.NewTuple(params...), isddd
-}
-
-func (p *importer) param(named bool) (*types.Var, bool) {
- t := p.typ(nil, nil)
- td, isddd := t.(*dddSlice)
- if isddd {
- t = types.NewSlice(td.elem)
- }
-
- var pkg *types.Package
- var name string
- if named {
- name = p.string()
- if name == "" {
- errorf("expected named parameter")
- }
- if name != "_" {
- pkg = p.pkg()
- }
- if i := strings.Index(name, "·"); i > 0 {
- name = name[:i] // cut off gc-specific parameter numbering
- }
- }
-
- // read and discard compiler-specific info
- p.string()
-
- return types.NewVar(token.NoPos, pkg, name, t), isddd
-}
-
-func exported(name string) bool {
- ch, _ := utf8.DecodeRuneInString(name)
- return unicode.IsUpper(ch)
-}
-
-func (p *importer) value() constant.Value {
- switch tag := p.tagOrIndex(); tag {
- case falseTag:
- return constant.MakeBool(false)
- case trueTag:
- return constant.MakeBool(true)
- case int64Tag:
- return constant.MakeInt64(p.int64())
- case floatTag:
- return p.float()
- case complexTag:
- re := p.float()
- im := p.float()
- return constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
- case stringTag:
- return constant.MakeString(p.string())
- case unknownTag:
- return constant.MakeUnknown()
- default:
- errorf("unexpected value tag %d", tag) // panics
- panic("unreachable")
- }
-}
-
-func (p *importer) float() constant.Value {
- sign := p.int()
- if sign == 0 {
- return constant.MakeInt64(0)
- }
-
- exp := p.int()
- mant := []byte(p.string()) // big endian
-
- // remove leading 0's if any
- for len(mant) > 0 && mant[0] == 0 {
- mant = mant[1:]
- }
-
- // convert to little endian
- // TODO(gri) go/constant should have a more direct conversion function
- // (e.g., once it supports a big.Float based implementation)
- for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 {
- mant[i], mant[j] = mant[j], mant[i]
- }
-
- // adjust exponent (constant.MakeFromBytes creates an integer value,
- // but mant represents the mantissa bits such that 0.5 <= mant < 1.0)
- exp -= len(mant) << 3
- if len(mant) > 0 {
- for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 {
- exp++
- }
- }
-
- x := constant.MakeFromBytes(mant)
- switch {
- case exp < 0:
- d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
- x = constant.BinaryOp(x, token.QUO, d)
- case exp > 0:
- x = constant.Shift(x, token.SHL, uint(exp))
- }
-
- if sign < 0 {
- x = constant.UnaryOp(token.SUB, x, 0)
- }
- return x
-}
-
-// ----------------------------------------------------------------------------
-// Low-level decoders
-
-func (p *importer) tagOrIndex() int {
- if p.debugFormat {
- p.marker('t')
- }
-
- return int(p.rawInt64())
-}
-
-func (p *importer) int() int {
- x := p.int64()
- if int64(int(x)) != x {
- errorf("exported integer too large")
- }
- return int(x)
-}
-
-func (p *importer) int64() int64 {
- if p.debugFormat {
- p.marker('i')
- }
-
- return p.rawInt64()
-}
-
-func (p *importer) path() string {
- if p.debugFormat {
- p.marker('p')
- }
- // if the path was seen before, i is its index (>= 0)
- // (the empty string is at index 0)
- i := p.rawInt64()
- if i >= 0 {
- return p.pathList[i]
- }
- // otherwise, i is the negative path length (< 0)
- a := make([]string, -i)
- for n := range a {
- a[n] = p.string()
- }
- s := strings.Join(a, "/")
- p.pathList = append(p.pathList, s)
- return s
-}
-
-func (p *importer) string() string {
- if p.debugFormat {
- p.marker('s')
- }
- // if the string was seen before, i is its index (>= 0)
- // (the empty string is at index 0)
- i := p.rawInt64()
- if i >= 0 {
- return p.strList[i]
- }
- // otherwise, i is the negative string length (< 0)
- if n := int(-i); n <= cap(p.buf) {
- p.buf = p.buf[:n]
- } else {
- p.buf = make([]byte, n)
- }
- for i := range p.buf {
- p.buf[i] = p.rawByte()
- }
- s := string(p.buf)
- p.strList = append(p.strList, s)
- return s
-}
-
-func (p *importer) marker(want byte) {
- if got := p.rawByte(); got != want {
- errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
- }
-
- pos := p.read
- if n := int(p.rawInt64()); n != pos {
- errorf("incorrect position: got %d; want %d", n, pos)
- }
-}
-
-// rawInt64 should only be used by low-level decoders.
-func (p *importer) rawInt64() int64 {
- i, err := binary.ReadVarint(p)
- if err != nil {
- errorf("read error: %v", err)
- }
- return i
-}
-
-// rawStringln should only be used to read the initial version string.
-func (p *importer) rawStringln(b byte) string {
- p.buf = p.buf[:0]
- for b != '\n' {
- p.buf = append(p.buf, b)
- b = p.rawByte()
- }
- return string(p.buf)
-}
-
-// needed for binary.ReadVarint in rawInt64
-func (p *importer) ReadByte() (byte, error) {
- return p.rawByte(), nil
-}
-
-// byte is the bottleneck interface for reading p.data.
-// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
-// rawByte should only be used by low-level decoders.
-func (p *importer) rawByte() byte {
- b := p.data[0]
- r := 1
- if b == '|' {
- b = p.data[1]
- r = 2
- switch b {
- case 'S':
- b = '$'
- case '|':
- // nothing to do
- default:
- errorf("unexpected escape sequence in export data")
- }
- }
- p.data = p.data[r:]
- p.read += r
- return b
-
-}
-
-// ----------------------------------------------------------------------------
-// Export format
-
-// Tags. Must be < 0.
-const (
- // Objects
- packageTag = -(iota + 1)
- constTag
- typeTag
- varTag
- funcTag
- endTag
-
- // Types
- namedTag
- arrayTag
- sliceTag
- dddTag
- structTag
- pointerTag
- signatureTag
- interfaceTag
- mapTag
- chanTag
-
- // Values
- falseTag
- trueTag
- int64Tag
- floatTag
- fractionTag // not used by gc
- complexTag
- stringTag
- nilTag // only used by gc (appears in exported inlined function bodies)
- unknownTag // not used by gc (only appears in packages with errors)
-
- // Type aliases
- aliasTag
-)
-
-var predeclOnce sync.Once
-var predecl []types.Type // initialized lazily
-
-func predeclared() []types.Type {
- predeclOnce.Do(func() {
- // initialize lazily to be sure that all
- // elements have been initialized before
- predecl = []types.Type{ // basic types
- types.Typ[types.Bool],
- types.Typ[types.Int],
- types.Typ[types.Int8],
- types.Typ[types.Int16],
- types.Typ[types.Int32],
- types.Typ[types.Int64],
- types.Typ[types.Uint],
- types.Typ[types.Uint8],
- types.Typ[types.Uint16],
- types.Typ[types.Uint32],
- types.Typ[types.Uint64],
- types.Typ[types.Uintptr],
- types.Typ[types.Float32],
- types.Typ[types.Float64],
- types.Typ[types.Complex64],
- types.Typ[types.Complex128],
- types.Typ[types.String],
-
- // basic type aliases
- types.Universe.Lookup("byte").Type(),
- types.Universe.Lookup("rune").Type(),
-
- // error
- types.Universe.Lookup("error").Type(),
-
- // untyped types
- types.Typ[types.UntypedBool],
- types.Typ[types.UntypedInt],
- types.Typ[types.UntypedRune],
- types.Typ[types.UntypedFloat],
- types.Typ[types.UntypedComplex],
- types.Typ[types.UntypedString],
- types.Typ[types.UntypedNil],
-
- // package unsafe
- types.Typ[types.UnsafePointer],
-
- // invalid type
- types.Typ[types.Invalid], // only appears in packages with errors
-
- // used internally by gc; never used by this package or in .a files
- anyType{},
- }
- })
- return predecl
-}
-
-type anyType struct{}
-
-func (t anyType) Underlying() types.Type { return t }
-func (t anyType) String() string { return "any" }
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
index 9cf186605..6a9265ea9 100644
--- a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
@@ -204,14 +204,11 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
// Or, define a new standard go/types/gcexportdata package.
fset := token.NewFileSet()
- // The indexed export format starts with an 'i'; the older
- // binary export format starts with a 'c', 'd', or 'v'
- // (from "version"). Select appropriate importer.
- if len(data) > 0 && data[0] == 'i' {
- _, pkg, err = IImportData(fset, packages, data[1:], id)
- } else {
- _, pkg, err = BImportData(fset, packages, data, id)
+ // The indexed export format starts with an 'i'.
+ if len(data) == 0 || data[0] != 'i' {
+ return nil, fmt.Errorf("unknown export data format")
}
+ _, pkg, err = IImportData(fset, packages, data[1:], id)
default:
err = fmt.Errorf("unknown export data header: %q", hdr)
@@ -344,7 +341,7 @@ func (p *parser) expectKeyword(keyword string) {
// PackageId = string_lit .
//
-func (p *parser) parsePackageId() string {
+func (p *parser) parsePackageID() string {
id, err := strconv.Unquote(p.expect(scanner.String))
if err != nil {
p.error(err)
@@ -384,7 +381,7 @@ func (p *parser) parseDotIdent() string {
//
func (p *parser) parseQualifiedName() (id, name string) {
p.expect('@')
- id = p.parsePackageId()
+ id = p.parsePackageID()
p.expect('.')
// Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields.
if p.tok == '?' {
@@ -696,7 +693,7 @@ func (p *parser) parseInterfaceType(parent *types.Package) types.Type {
// Complete requires the type's embedded interfaces to be fully defined,
// but we do not define any
- return types.NewInterface(methods, nil).Complete()
+ return newInterface(methods, nil).Complete()
}
// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
@@ -785,7 +782,7 @@ func (p *parser) parseType(parent *types.Package) types.Type {
func (p *parser) parseImportDecl() {
p.expectKeyword("import")
name := p.parsePackageName()
- p.getPkg(p.parsePackageId(), name)
+ p.getPkg(p.parsePackageID(), name)
}
// int_lit = [ "+" | "-" ] { "0" ... "9" } .
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
index 4be32a2e5..858eb9f45 100644
--- a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
@@ -11,6 +11,7 @@ package gcimporter
import (
"bytes"
"encoding/binary"
+ "fmt"
"go/ast"
"go/constant"
"go/token"
@@ -25,6 +26,15 @@ import (
// 0: Go1.11 encoding
const iexportVersion = 0
+// internalError represents an error generated inside this package.
+type internalError string
+
+func (e internalError) Error() string { return "gcimporter: " + string(e) }
+
+func internalErrorf(format string, args ...interface{}) error {
+ return internalError(fmt.Sprintf(format, args...))
+}
+
// IExportData returns the binary export data for pkg.
//
// If no file set is provided, position info will be missing.
@@ -528,6 +538,16 @@ func constantToFloat(x constant.Value) *big.Float {
return &f
}
+func valueToRat(x constant.Value) *big.Rat {
+ // Convert little-endian to big-endian.
+ // I can't believe this is necessary.
+ bytes := constant.Bytes(x)
+ for i := 0; i < len(bytes)/2; i++ {
+ bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
+ }
+ return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
+}
+
// mpint exports a multi-precision integer.
//
// For unsigned types, small values are written out as a single
diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
index a31a88026..fef8b3008 100644
--- a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
+++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
@@ -18,6 +18,9 @@ import (
"go/types"
"io"
"sort"
+ "sync"
+ "unicode"
+ "unicode/utf8"
)
type intReader struct {
@@ -25,6 +28,10 @@ type intReader struct {
path string
}
+func errorf(format string, args ...interface{}) {
+ panic(fmt.Sprintf(format, args...))
+}
+
func (r *intReader) int64() int64 {
i, err := binary.ReadVarint(r.Reader)
if err != nil {
@@ -628,3 +635,166 @@ func (r *importReader) byte() byte {
}
return x
}
+
+const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
+
+// Synthesize a token.Pos
+type fakeFileSet struct {
+ fset *token.FileSet
+ files map[string]*token.File
+}
+
+func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
+ // TODO(mdempsky): Make use of column.
+
+ // Since we don't know the set of needed file positions, we
+ // reserve maxlines positions per file.
+ const maxlines = 64 * 1024
+ f := s.files[file]
+ if f == nil {
+ f = s.fset.AddFile(file, -1, maxlines)
+ s.files[file] = f
+ // Allocate the fake linebreak indices on first use.
+ // TODO(adonovan): opt: save ~512KB using a more complex scheme?
+ fakeLinesOnce.Do(func() {
+ fakeLines = make([]int, maxlines)
+ for i := range fakeLines {
+ fakeLines[i] = i
+ }
+ })
+ f.SetLines(fakeLines)
+ }
+
+ if line > maxlines {
+ line = 1
+ }
+
+ // Treat the file as if it contained only newlines
+ // and column=1: use the line number as the offset.
+ return f.Pos(line - 1)
+}
+
+var (
+ fakeLines []int
+ fakeLinesOnce sync.Once
+)
+
+func chanDir(d int) types.ChanDir {
+ // tag values must match the constants in cmd/compile/internal/gc/go.go
+ switch d {
+ case 1 /* Crecv */ :
+ return types.RecvOnly
+ case 2 /* Csend */ :
+ return types.SendOnly
+ case 3 /* Cboth */ :
+ return types.SendRecv
+ default:
+ errorf("unexpected channel dir %d", d)
+ return 0
+ }
+}
+
+func exported(name string) bool {
+ ch, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(ch)
+}
+
+// ----------------------------------------------------------------------------
+// Export format
+
+// Tags. Must be < 0.
+const (
+ // Objects
+ packageTag = -(iota + 1)
+ constTag
+ typeTag
+ varTag
+ funcTag
+ endTag
+
+ // Types
+ namedTag
+ arrayTag
+ sliceTag
+ dddTag
+ structTag
+ pointerTag
+ signatureTag
+ interfaceTag
+ mapTag
+ chanTag
+
+ // Values
+ falseTag
+ trueTag
+ int64Tag
+ floatTag
+ fractionTag // not used by gc
+ complexTag
+ stringTag
+ nilTag // only used by gc (appears in exported inlined function bodies)
+ unknownTag // not used by gc (only appears in packages with errors)
+
+ // Type aliases
+ aliasTag
+)
+
+var predeclOnce sync.Once
+var predecl []types.Type // initialized lazily
+
+func predeclared() []types.Type {
+ predeclOnce.Do(func() {
+ // initialize lazily to be sure that all
+ // elements have been initialized before
+ predecl = []types.Type{ // basic types
+ types.Typ[types.Bool],
+ types.Typ[types.Int],
+ types.Typ[types.Int8],
+ types.Typ[types.Int16],
+ types.Typ[types.Int32],
+ types.Typ[types.Int64],
+ types.Typ[types.Uint],
+ types.Typ[types.Uint8],
+ types.Typ[types.Uint16],
+ types.Typ[types.Uint32],
+ types.Typ[types.Uint64],
+ types.Typ[types.Uintptr],
+ types.Typ[types.Float32],
+ types.Typ[types.Float64],
+ types.Typ[types.Complex64],
+ types.Typ[types.Complex128],
+ types.Typ[types.String],
+
+ // basic type aliases
+ types.Universe.Lookup("byte").Type(),
+ types.Universe.Lookup("rune").Type(),
+
+ // error
+ types.Universe.Lookup("error").Type(),
+
+ // untyped types
+ types.Typ[types.UntypedBool],
+ types.Typ[types.UntypedInt],
+ types.Typ[types.UntypedRune],
+ types.Typ[types.UntypedFloat],
+ types.Typ[types.UntypedComplex],
+ types.Typ[types.UntypedString],
+ types.Typ[types.UntypedNil],
+
+ // package unsafe
+ types.Typ[types.UnsafePointer],
+
+ // invalid type
+ types.Typ[types.Invalid], // only appears in packages with errors
+
+ // used internally by gc; never used by this package or in .a files
+ anyType{},
+ }
+ })
+ return predecl
+}
+
+type anyType struct{}
+
+func (t anyType) Underlying() types.Type { return t }
+func (t anyType) String() string { return "any" }
diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
index db0c9a7ea..dc6177c12 100644
--- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
+++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
@@ -11,17 +11,15 @@ import (
"encoding/json"
"fmt"
"go/types"
- "log"
- "os"
"os/exec"
"strings"
- "time"
+
+ "golang.org/x/tools/internal/gocommand"
)
var debug = false
-// GetSizes returns the sizes used by the underlying driver with the given parameters.
-func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
+func GetSizes(ctx context.Context, buildFlags, env []string, gocmdRunner *gocommand.Runner, dir string) (types.Sizes, error) {
// TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver.
const toolPrefix = "GOPACKAGESDRIVER="
tool := ""
@@ -41,7 +39,7 @@ func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExp
}
if tool == "off" {
- return GetSizesGolist(ctx, buildFlags, env, dir, usesExportData)
+ return GetSizesGolist(ctx, buildFlags, env, gocmdRunner, dir)
}
req, err := json.Marshal(struct {
@@ -77,98 +75,43 @@ func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExp
return response.Sizes, nil
}
-func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
- args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"}
- args = append(args, buildFlags...)
- args = append(args, "--", "unsafe")
- stdout, stderr, err := invokeGo(ctx, env, dir, usesExportData, args...)
+func GetSizesGolist(ctx context.Context, buildFlags, env []string, gocmdRunner *gocommand.Runner, dir string) (types.Sizes, error) {
+ inv := gocommand.Invocation{
+ Verb: "list",
+ Args: []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"},
+ Env: env,
+ BuildFlags: buildFlags,
+ WorkingDir: dir,
+ }
+ stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
var goarch, compiler string
- if err != nil {
- if strings.Contains(err.Error(), "cannot find main module") {
+ if rawErr != nil {
+ if strings.Contains(rawErr.Error(), "cannot find main module") {
// User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc.
// TODO(matloob): Is this a problem in practice?
- envout, _, enverr := invokeGo(ctx, env, dir, usesExportData, "env", "GOARCH")
+ inv := gocommand.Invocation{
+ Verb: "env",
+ Args: []string{"GOARCH"},
+ Env: env,
+ WorkingDir: dir,
+ }
+ envout, enverr := gocmdRunner.Run(ctx, inv)
if enverr != nil {
- return nil, err
+ return nil, enverr
}
goarch = strings.TrimSpace(envout.String())
compiler = "gc"
} else {
- return nil, err
+ return nil, friendlyErr
}
} else {
fields := strings.Fields(stdout.String())
if len(fields) < 2 {
- return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\" from stdout of go command:\n%s\ndir: %s\nstdout: <<%s>>\nstderr: <<%s>>",
- cmdDebugStr(env, args...), dir, stdout.String(), stderr.String())
+ return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\":\nstdout: <<%s>>\nstderr: <<%s>>",
+ stdout.String(), stderr.String())
}
goarch = fields[0]
compiler = fields[1]
}
return types.SizesFor(compiler, goarch), nil
}
-
-// invokeGo returns the stdout and stderr of a go command invocation.
-func invokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, *bytes.Buffer, error) {
- if debug {
- defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now())
- }
- stdout := new(bytes.Buffer)
- stderr := new(bytes.Buffer)
- cmd := exec.CommandContext(ctx, "go", args...)
- // On darwin the cwd gets resolved to the real path, which breaks anything that
- // expects the working directory to keep the original path, including the
- // go command when dealing with modules.
- // The Go stdlib has a special feature where if the cwd and the PWD are the
- // same node then it trusts the PWD, so by setting it in the env for the child
- // process we fix up all the paths returned by the go command.
- cmd.Env = append(append([]string{}, env...), "PWD="+dir)
- cmd.Dir = dir
- cmd.Stdout = stdout
- cmd.Stderr = stderr
- if err := cmd.Run(); err != nil {
- exitErr, ok := err.(*exec.ExitError)
- if !ok {
- // Catastrophic error:
- // - executable not found
- // - context cancellation
- return nil, nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
- }
-
- // Export mode entails a build.
- // If that build fails, errors appear on stderr
- // (despite the -e flag) and the Export field is blank.
- // Do not fail in that case.
- if !usesExportData {
- return nil, nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
- }
- }
-
- // As of writing, go list -export prints some non-fatal compilation
- // errors to stderr, even with -e set. We would prefer that it put
- // them in the Package.Error JSON (see https://golang.org/issue/26319).
- // In the meantime, there's nowhere good to put them, but they can
- // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
- // is set.
- if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
- fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr)
- }
-
- // debugging
- if false {
- fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout)
- }
-
- return stdout, stderr, nil
-}
-
-func cmdDebugStr(envlist []string, args ...string) string {
- env := make(map[string]string)
- for _, kv := range envlist {
- split := strings.Split(kv, "=")
- k, v := split[0], split[1]
- env[k] = v
- }
-
- return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args)
-}
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
index a9a1ba89e..88ca6691d 100644
--- a/vendor/golang.org/x/tools/go/packages/golist.go
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -6,26 +6,26 @@ package packages
import (
"bytes"
+ "context"
"encoding/json"
"fmt"
"go/types"
- "io/ioutil"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
- "regexp"
+ "sort"
"strconv"
"strings"
"sync"
- "time"
"unicode"
"golang.org/x/tools/go/internal/packagesdriver"
- "golang.org/x/tools/internal/gopathwalk"
- "golang.org/x/tools/internal/semver"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/packagesinternal"
+ "golang.org/x/xerrors"
)
// debug controls verbose logging.
@@ -44,16 +44,21 @@ type responseDeduper struct {
dr *driverResponse
}
-// init fills in r with a driverResponse.
-func (r *responseDeduper) init(dr *driverResponse) {
- r.dr = dr
- r.seenRoots = map[string]bool{}
- r.seenPackages = map[string]*Package{}
+func newDeduper() *responseDeduper {
+ return &responseDeduper{
+ dr: &driverResponse{},
+ seenRoots: map[string]bool{},
+ seenPackages: map[string]*Package{},
+ }
+}
+
+// addAll fills in r with a driverResponse.
+func (r *responseDeduper) addAll(dr *driverResponse) {
for _, pkg := range dr.Packages {
- r.seenPackages[pkg.ID] = pkg
+ r.addPackage(pkg)
}
for _, root := range dr.Roots {
- r.seenRoots[root] = true
+ r.addRoot(root)
}
}
@@ -73,25 +78,47 @@ func (r *responseDeduper) addRoot(id string) {
r.dr.Roots = append(r.dr.Roots, id)
}
-// goInfo contains global information from the go tool.
-type goInfo struct {
- rootDirs map[string]string
- env goEnv
+type golistState struct {
+ cfg *Config
+ ctx context.Context
+
+ envOnce sync.Once
+ goEnvError error
+ goEnv map[string]string
+
+ rootsOnce sync.Once
+ rootDirsError error
+ rootDirs map[string]string
+
+ // vendorDirs caches the (non)existence of vendor directories.
+ vendorDirs map[string]bool
}
-type goEnv struct {
- modulesOn bool
+// getEnv returns Go environment variables. Only specific variables are
+// populated -- computing all of them is slow.
+func (state *golistState) getEnv() (map[string]string, error) {
+ state.envOnce.Do(func() {
+ var b *bytes.Buffer
+ b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH")
+ if state.goEnvError != nil {
+ return
+ }
+
+ state.goEnv = make(map[string]string)
+ decoder := json.NewDecoder(b)
+ if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil {
+ return
+ }
+ })
+ return state.goEnv, state.goEnvError
}
-func determineEnv(cfg *Config) goEnv {
- buf, err := invokeGo(cfg, "env", "GOMOD")
+// mustGetEnv is a convenience function that can be used if getEnv has already succeeded.
+func (state *golistState) mustGetEnv() map[string]string {
+ env, err := state.getEnv()
if err != nil {
- return goEnv{}
+ panic(fmt.Sprintf("mustGetEnv: %v", err))
}
- gomod := bytes.TrimSpace(buf.Bytes())
-
- env := goEnv{}
- env.modulesOn = len(gomod) > 0
return env
}
@@ -99,47 +126,38 @@ func determineEnv(cfg *Config) goEnv {
// the build system package structure.
// See driver for more details.
func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
- var sizes types.Sizes
+ // Make sure that any asynchronous go commands are killed when we return.
+ parentCtx := cfg.Context
+ if parentCtx == nil {
+ parentCtx = context.Background()
+ }
+ ctx, cancel := context.WithCancel(parentCtx)
+ defer cancel()
+
+ response := newDeduper()
+
+ // Fill in response.Sizes asynchronously if necessary.
var sizeserr error
var sizeswg sync.WaitGroup
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
sizeswg.Add(1)
go func() {
- sizes, sizeserr = getSizes(cfg)
+ var sizes types.Sizes
+ sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, cfg.BuildFlags, cfg.Env, cfg.gocmdRunner, cfg.Dir)
+ // types.SizesFor always returns nil or a *types.StdSizes.
+ response.dr.Sizes, _ = sizes.(*types.StdSizes)
sizeswg.Done()
}()
}
- defer sizeswg.Wait()
-
- // start fetching rootDirs
- var info goInfo
- var rootDirsReady, envReady = make(chan struct{}), make(chan struct{})
- go func() {
- info.rootDirs = determineRootDirs(cfg)
- close(rootDirsReady)
- }()
- go func() {
- info.env = determineEnv(cfg)
- close(envReady)
- }()
- getGoInfo := func() *goInfo {
- <-rootDirsReady
- <-envReady
- return &info
- }
-
- // Ensure that we don't leak goroutines: Load is synchronous, so callers will
- // not expect it to access the fields of cfg after the call returns.
- defer getGoInfo()
- // always pass getGoInfo to golistDriver
- golistDriver := func(cfg *Config, patterns ...string) (*driverResponse, error) {
- return golistDriver(cfg, getGoInfo, patterns...)
+ state := &golistState{
+ cfg: cfg,
+ ctx: ctx,
+ vendorDirs: map[string]bool{},
}
// Determine files requested in contains patterns
var containFiles []string
- var packagesNamed []string
restPatterns := make([]string, 0, len(patterns))
// Extract file= and other [querytype]= patterns. Report an error if querytype
// doesn't exist.
@@ -155,8 +173,6 @@ extractQueries:
containFiles = append(containFiles, value)
case "pattern":
restPatterns = append(restPatterns, value)
- case "iamashamedtousethedisabledqueryname":
- packagesNamed = append(packagesNamed, value)
case "": // not a reserved query
restPatterns = append(restPatterns, pattern)
default:
@@ -172,52 +188,34 @@ extractQueries:
}
}
- response := &responseDeduper{}
- var err error
-
// See if we have any patterns to pass through to go list. Zero initial
// patterns also requires a go list call, since it's the equivalent of
// ".".
if len(restPatterns) > 0 || len(patterns) == 0 {
- dr, err := golistDriver(cfg, restPatterns...)
+ dr, err := state.createDriverResponse(restPatterns...)
if err != nil {
return nil, err
}
- response.init(dr)
- } else {
- response.init(&driverResponse{})
- }
-
- sizeswg.Wait()
- if sizeserr != nil {
- return nil, sizeserr
+ response.addAll(dr)
}
- // types.SizesFor always returns nil or a *types.StdSizes
- response.dr.Sizes, _ = sizes.(*types.StdSizes)
-
- var containsCandidates []string
if len(containFiles) != 0 {
- if err := runContainsQueries(cfg, golistDriver, response, containFiles, getGoInfo); err != nil {
+ if err := state.runContainsQueries(response, containFiles); err != nil {
return nil, err
}
}
- if len(packagesNamed) != 0 {
- if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil {
- return nil, err
- }
- }
-
- modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo)
+ modifiedPkgs, needPkgs, err := state.processGolistOverlay(response)
if err != nil {
return nil, err
}
+
+ var containsCandidates []string
if len(containFiles) > 0 {
containsCandidates = append(containsCandidates, modifiedPkgs...)
containsCandidates = append(containsCandidates, needPkgs...)
}
- if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs, getGoInfo); err != nil {
+ if err := state.addNeededOverlayPackages(response, needPkgs); err != nil {
return nil, err
}
// Check candidate packages for containFiles.
@@ -246,28 +244,32 @@ extractQueries:
}
}
+ sizeswg.Wait()
+ if sizeserr != nil {
+ return nil, sizeserr
+ }
return response.dr, nil
}
-func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string, getGoInfo func() *goInfo) error {
+func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error {
if len(pkgs) == 0 {
return nil
}
- dr, err := driver(cfg, pkgs...)
+ dr, err := state.createDriverResponse(pkgs...)
if err != nil {
return err
}
for _, pkg := range dr.Packages {
response.addPackage(pkg)
}
- _, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo)
+ _, needPkgs, err := state.processGolistOverlay(response)
if err != nil {
return err
}
- return addNeededOverlayPackages(cfg, driver, response, needPkgs, getGoInfo)
+ return state.addNeededOverlayPackages(response, needPkgs)
}
-func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string, goInfo func() *goInfo) error {
+func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error {
for _, query := range queries {
// TODO(matloob): Do only one query per directory.
fdir := filepath.Dir(query)
@@ -277,44 +279,17 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q
if err != nil {
return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
}
- dirResponse, err := driver(cfg, pattern)
- if err != nil || (len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1) {
- // There was an error loading the package. Try to load the file as an ad-hoc package.
- // Usually the error will appear in a returned package, but may not if we're in modules mode
- // and the ad-hoc is located outside a module.
+ dirResponse, err := state.createDriverResponse(pattern)
+
+ // If there was an error loading the package, or the package is returned
+ // with errors, try to load the file as an ad-hoc package.
+ // Usually the error will appear in a returned package, but may not if we're
+ // in module mode and the ad-hoc is located outside a module.
+ if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 &&
+ len(dirResponse.Packages[0].Errors) == 1 {
var queryErr error
- dirResponse, queryErr = driver(cfg, query)
- if queryErr != nil {
- // Return the original error if the attempt to fall back failed.
- return err
- }
- // If we get nothing back from `go list`, try to make this file into its own ad-hoc package.
- if len(dirResponse.Packages) == 0 && queryErr == nil {
- dirResponse.Packages = append(dirResponse.Packages, &Package{
- ID: "command-line-arguments",
- PkgPath: query,
- GoFiles: []string{query},
- CompiledGoFiles: []string{query},
- Imports: make(map[string]*Package),
- })
- dirResponse.Roots = append(dirResponse.Roots, "command-line-arguments")
- }
- // Special case to handle issue #33482:
- // If this is a file= query for ad-hoc packages where the file only exists on an overlay,
- // and exists outside of a module, add the file in for the package.
- if len(dirResponse.Packages) == 1 && (dirResponse.Packages[0].ID == "command-line-arguments" ||
- filepath.ToSlash(dirResponse.Packages[0].PkgPath) == filepath.ToSlash(query)) {
- if len(dirResponse.Packages[0].GoFiles) == 0 {
- filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath
- // TODO(matloob): check if the file is outside of a root dir?
- for path := range cfg.Overlay {
- if path == filename {
- dirResponse.Packages[0].Errors = nil
- dirResponse.Packages[0].GoFiles = []string{path}
- dirResponse.Packages[0].CompiledGoFiles = []string{path}
- }
- }
- }
+ if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil {
+ return err // return the original error
}
}
isRoot := make(map[string]bool, len(dirResponse.Roots))
@@ -342,276 +317,47 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q
return nil
}
-// modCacheRegexp splits a path in a module cache into module, module version, and package.
-var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
-
-func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error {
- // calling `go env` isn't free; bail out if there's nothing to do.
- if len(queries) == 0 {
- return nil
- }
- // Determine which directories are relevant to scan.
- roots, modRoot, err := roots(cfg)
- if err != nil {
- return err
- }
-
- // Scan the selected directories. Simple matches, from GOPATH/GOROOT
- // or the local module, can simply be "go list"ed. Matches from the
- // module cache need special treatment.
- var matchesMu sync.Mutex
- var simpleMatches, modCacheMatches []string
- add := func(root gopathwalk.Root, dir string) {
- // Walk calls this concurrently; protect the result slices.
- matchesMu.Lock()
- defer matchesMu.Unlock()
-
- path := dir
- if dir != root.Path {
- path = dir[len(root.Path)+1:]
- }
- if pathMatchesQueries(path, queries) {
- switch root.Type {
- case gopathwalk.RootModuleCache:
- modCacheMatches = append(modCacheMatches, path)
- case gopathwalk.RootCurrentModule:
- // We'd need to read go.mod to find the full
- // import path. Relative's easier.
- rel, err := filepath.Rel(cfg.Dir, dir)
- if err != nil {
- // This ought to be impossible, since
- // we found dir in the current module.
- panic(err)
- }
- simpleMatches = append(simpleMatches, "./"+rel)
- case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT:
- simpleMatches = append(simpleMatches, path)
- }
- }
- }
-
- startWalk := time.Now()
- gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug})
- cfg.Logf("%v for walk", time.Since(startWalk))
-
- // Weird special case: the top-level package in a module will be in
- // whatever directory the user checked the repository out into. It's
- // more reasonable for that to not match the package name. So, if there
- // are any Go files in the mod root, query it just to be safe.
- if modRoot != "" {
- rel, err := filepath.Rel(cfg.Dir, modRoot)
- if err != nil {
- panic(err) // See above.
- }
-
- files, err := ioutil.ReadDir(modRoot)
- if err != nil {
- panic(err) // See above.
- }
-
- for _, f := range files {
- if strings.HasSuffix(f.Name(), ".go") {
- simpleMatches = append(simpleMatches, rel)
- break
- }
- }
- }
-
- addResponse := func(r *driverResponse) {
- for _, pkg := range r.Packages {
- response.addPackage(pkg)
- for _, name := range queries {
- if pkg.Name == name {
- response.addRoot(pkg.ID)
- break
- }
- }
- }
- }
-
- if len(simpleMatches) != 0 {
- resp, err := driver(cfg, simpleMatches...)
- if err != nil {
- return err
- }
- addResponse(resp)
- }
-
- // Module cache matches are tricky. We want to avoid downloading new
- // versions of things, so we need to use the ones present in the cache.
- // go list doesn't accept version specifiers, so we have to write out a
- // temporary module, and do the list in that module.
- if len(modCacheMatches) != 0 {
- // Collect all the matches, deduplicating by major version
- // and preferring the newest.
- type modInfo struct {
- mod string
- major string
- }
- mods := make(map[modInfo]string)
- var imports []string
- for _, modPath := range modCacheMatches {
- matches := modCacheRegexp.FindStringSubmatch(modPath)
- mod, ver := filepath.ToSlash(matches[1]), matches[2]
- importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3]))
-
- major := semver.Major(ver)
- if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 {
- mods[modInfo{mod, major}] = ver
- }
-
- imports = append(imports, importPath)
- }
-
- // Build the temporary module.
- var gomod bytes.Buffer
- gomod.WriteString("module modquery\nrequire (\n")
- for mod, version := range mods {
- gomod.WriteString("\t" + mod.mod + " " + version + "\n")
- }
- gomod.WriteString(")\n")
-
- tmpCfg := *cfg
-
- // We're only trying to look at stuff in the module cache, so
- // disable the network. This should speed things up, and has
- // prevented errors in at least one case, #28518.
- tmpCfg.Env = append([]string{"GOPROXY=off"}, cfg.Env...)
-
- var err error
- tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery")
- if err != nil {
- return err
- }
- defer os.RemoveAll(tmpCfg.Dir)
-
- if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil {
- return fmt.Errorf("writing go.mod for module cache query: %v", err)
- }
-
- // Run the query, using the import paths calculated from the matches above.
- resp, err := driver(&tmpCfg, imports...)
- if err != nil {
- return fmt.Errorf("querying module cache matches: %v", err)
- }
- addResponse(resp)
- }
-
- return nil
-}
-
-func getSizes(cfg *Config) (types.Sizes, error) {
- return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg))
-}
-
-// roots selects the appropriate paths to walk based on the passed-in configuration,
-// particularly the environment and the presence of a go.mod in cfg.Dir's parents.
-func roots(cfg *Config) ([]gopathwalk.Root, string, error) {
- stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD")
+// adhocPackage attempts to load or construct an ad-hoc package for a given
+// query, if the original call to the driver produced inadequate results.
+func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) {
+ response, err := state.createDriverResponse(query)
if err != nil {
- return nil, "", err
- }
-
- fields := strings.Split(stdout.String(), "\n")
- if len(fields) != 4 || len(fields[3]) != 0 {
- return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String())
- }
- goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2]
- var modDir string
- if gomod != "" {
- modDir = filepath.Dir(gomod)
+ return nil, err
}
-
- var roots []gopathwalk.Root
- // Always add GOROOT.
- roots = append(roots, gopathwalk.Root{
- Path: filepath.Join(goroot, "/src"),
- Type: gopathwalk.RootGOROOT,
- })
- // If modules are enabled, scan the module dir.
- if modDir != "" {
- roots = append(roots, gopathwalk.Root{
- Path: modDir,
- Type: gopathwalk.RootCurrentModule,
+ // If we get nothing back from `go list`,
+ // try to make this file into its own ad-hoc package.
+ // TODO(rstambler): Should this check against the original response?
+ if len(response.Packages) == 0 {
+ response.Packages = append(response.Packages, &Package{
+ ID: "command-line-arguments",
+ PkgPath: query,
+ GoFiles: []string{query},
+ CompiledGoFiles: []string{query},
+ Imports: make(map[string]*Package),
})
- }
- // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode.
- for _, p := range gopath {
- if modDir != "" {
- roots = append(roots, gopathwalk.Root{
- Path: filepath.Join(p, "/pkg/mod"),
- Type: gopathwalk.RootModuleCache,
- })
- } else {
- roots = append(roots, gopathwalk.Root{
- Path: filepath.Join(p, "/src"),
- Type: gopathwalk.RootGOPATH,
- })
- }
- }
-
- return roots, modDir, nil
-}
-
-// These functions were copied from goimports. See further documentation there.
-
-// pathMatchesQueries is adapted from pkgIsCandidate.
-// TODO: is it reasonable to do Contains here, rather than an exact match on a path component?
-func pathMatchesQueries(path string, queries []string) bool {
- lastTwo := lastTwoComponents(path)
- for _, query := range queries {
- if strings.Contains(lastTwo, query) {
- return true
- }
- if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) {
- lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
- if strings.Contains(lastTwo, query) {
- return true
- }
- }
- }
- return false
-}
-
-// lastTwoComponents returns at most the last two path components
-// of v, using either / or \ as the path separator.
-func lastTwoComponents(v string) string {
- nslash := 0
- for i := len(v) - 1; i >= 0; i-- {
- if v[i] == '/' || v[i] == '\\' {
- nslash++
- if nslash == 2 {
- return v[i:]
+ response.Roots = append(response.Roots, "command-line-arguments")
+ }
+ // Handle special cases.
+ if len(response.Packages) == 1 {
+ // golang/go#33482: If this is a file= query for ad-hoc packages where
+ // the file only exists on an overlay, and exists outside of a module,
+ // add the file to the package and remove the errors.
+ if response.Packages[0].ID == "command-line-arguments" ||
+ filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) {
+ if len(response.Packages[0].GoFiles) == 0 {
+ filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath
+ // TODO(matloob): check if the file is outside of a root dir?
+ for path := range state.cfg.Overlay {
+ if path == filename {
+ response.Packages[0].Errors = nil
+ response.Packages[0].GoFiles = []string{path}
+ response.Packages[0].CompiledGoFiles = []string{path}
+ }
+ }
}
}
}
- return v
-}
-
-func hasHyphenOrUpperASCII(s string) bool {
- for i := 0; i < len(s); i++ {
- b := s[i]
- if b == '-' || ('A' <= b && b <= 'Z') {
- return true
- }
- }
- return false
-}
-
-func lowerASCIIAndRemoveHyphen(s string) (ret string) {
- buf := make([]byte, 0, len(s))
- for i := 0; i < len(s); i++ {
- b := s[i]
- switch {
- case b == '-':
- continue
- case 'A' <= b && b <= 'Z':
- buf = append(buf, b+('a'-'A'))
- default:
- buf = append(buf, b)
- }
- }
- return string(buf)
+ return response, nil
}
// Fields must match go list;
@@ -636,6 +382,7 @@ type jsonPackage struct {
Imports []string
ImportMap map[string]string
Deps []string
+ Module *packagesinternal.Module
TestGoFiles []string
TestImports []string
XTestGoFiles []string
@@ -656,10 +403,9 @@ func otherFiles(p *jsonPackage) [][]string {
return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
}
-// golistDriver uses the "go list" command to expand the pattern
-// words and return metadata for the specified packages. dir may be
-// "" and env may be nil, as per os/exec.Command.
-func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driverResponse, error) {
+// createDriverResponse uses the "go list" command to expand the pattern
+// words and return a response for the specified packages.
+func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) {
// go list uses the following identifiers in ImportPath and Imports:
//
// "p" -- importable package or main (command)
@@ -673,11 +419,13 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv
// Run "go list" for complete
// information on the specified packages.
- buf, err := invokeGo(cfg, "list", golistargs(cfg, words)...)
+ buf, err := state.invokeGo("list", golistargs(state.cfg, words)...)
if err != nil {
return nil, err
}
seen := make(map[string]*jsonPackage)
+ pkgs := make(map[string]*Package)
+ additionalErrors := make(map[string][]Error)
// Decode the JSON and convert it to Package form.
var response driverResponse
for dec := json.NewDecoder(buf); dec.More(); {
@@ -708,18 +456,81 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv
// contained in a known module or GOPATH entry. This will allow the package to be
// properly "reclaimed" when overlays are processed.
if filepath.IsAbs(p.ImportPath) && p.Error != nil {
- pkgPath, ok := getPkgPath(cfg, p.ImportPath, rootsDirs)
+ pkgPath, ok, err := state.getPkgPath(p.ImportPath)
+ if err != nil {
+ return nil, err
+ }
if ok {
p.ImportPath = pkgPath
}
}
if old, found := seen[p.ImportPath]; found {
- if !reflect.DeepEqual(p, old) {
- return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
+ // If one version of the package has an error, and the other doesn't, assume
+ // that this is a case where go list is reporting a fake dependency variant
+ // of the imported package: When a package tries to invalidly import another
+ // package, go list emits a variant of the imported package (with the same
+ // import path, but with an error on it, and the package will have a
+ // DepError set on it). An example of when this can happen is for imports of
+ // main packages: main packages can not be imported, but they may be
+ // separately matched and listed by another pattern.
+ // See golang.org/issue/36188 for more details.
+
+ // The plan is that eventually, hopefully in Go 1.15, the error will be
+ // reported on the importing package rather than the duplicate "fake"
+ // version of the imported package. Once all supported versions of Go
+ // have the new behavior this logic can be deleted.
+ // TODO(matloob): delete the workaround logic once all supported versions of
+ // Go return the errors on the proper package.
+
+ // There should be exactly one version of a package that doesn't have an
+ // error.
+ if old.Error == nil && p.Error == nil {
+ if !reflect.DeepEqual(p, old) {
+ return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
+ }
+ continue
}
- // skip the duplicate
- continue
+
+ // Determine if this package's error needs to be bubbled up.
+ // This is a hack, and we expect for go list to eventually set the error
+ // on the package.
+ if old.Error != nil {
+ var errkind string
+ if strings.Contains(old.Error.Err, "not an importable package") {
+ errkind = "not an importable package"
+ } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") {
+ errkind = "use of internal package not allowed"
+ }
+ if errkind != "" {
+ if len(old.Error.ImportStack) < 1 {
+ return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind)
+ }
+ importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1]
+ if importingPkg == old.ImportPath {
+ // Using an older version of Go which put this package itself on top of import
+ // stack, instead of the importer. Look for importer in second from top
+ // position.
+ if len(old.Error.ImportStack) < 2 {
+ return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind)
+ }
+ importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2]
+ }
+ additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{
+ Pos: old.Error.Pos,
+ Msg: old.Error.Err,
+ Kind: ListError,
+ })
+ }
+ }
+
+ // Make sure that if there's a version of the package without an error,
+ // that's the one reported to the user.
+ if old.Error == nil {
+ continue
+ }
+
+ // This package will replace the old one at the end of the loop.
}
seen[p.ImportPath] = p
@@ -729,6 +540,8 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
+ forTest: p.ForTest,
+ module: p.Module,
}
// Work around https://golang.org/issue/28749:
@@ -817,29 +630,37 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv
})
}
+ pkgs[pkg.ID] = pkg
+ }
+
+ for id, errs := range additionalErrors {
+ if p, ok := pkgs[id]; ok {
+ p.Errors = append(p.Errors, errs...)
+ }
+ }
+ for _, pkg := range pkgs {
response.Packages = append(response.Packages, pkg)
}
+ sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID })
return &response, nil
}
// getPkgPath finds the package path of a directory if it's relative to a root directory.
-func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) {
+func (state *golistState) getPkgPath(dir string) (string, bool, error) {
absDir, err := filepath.Abs(dir)
if err != nil {
- cfg.Logf("error getting absolute path of %s: %v", dir, err)
- return "", false
+ return "", false, err
}
- for rdir, rpath := range goInfo().rootDirs {
- absRdir, err := filepath.Abs(rdir)
- if err != nil {
- cfg.Logf("error getting absolute path of %s: %v", rdir, err)
- continue
- }
+ roots, err := state.determineRootDirs()
+ if err != nil {
+ return "", false, err
+ }
+
+ for rdir, rpath := range roots {
// Make sure that the directory is in the module,
// to avoid creating a path relative to another module.
- if !strings.HasPrefix(absDir, absRdir) {
- cfg.Logf("%s does not have prefix %s", absDir, absRdir)
+ if !strings.HasPrefix(absDir, rdir) {
continue
}
// TODO(matloob): This doesn't properly handle symlinks.
@@ -854,11 +675,11 @@ func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) {
// Once the file is saved, gopls, or the next invocation of the tool will get the correct
// result straight from golist.
// TODO(matloob): Implement module tiebreaking?
- return path.Join(rpath, filepath.ToSlash(r)), true
+ return path.Join(rpath, filepath.ToSlash(r)), true, nil
}
- return filepath.ToSlash(r), true
+ return filepath.ToSlash(r), true, nil
}
- return "", false
+ return "", false, nil
}
// absJoin absolutizes and flattens the lists of files.
@@ -878,7 +699,7 @@ func golistargs(cfg *Config, words []string) []string {
const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
fullargs := []string{
"-e", "-json",
- fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0),
+ fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0),
fmt.Sprintf("-test=%t", cfg.Tests),
fmt.Sprintf("-export=%t", usesExportData(cfg)),
fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0),
@@ -893,28 +714,23 @@ func golistargs(cfg *Config, words []string) []string {
}
// invokeGo returns the stdout of a go command invocation.
-func invokeGo(cfg *Config, verb string, args ...string) (*bytes.Buffer, error) {
- stdout := new(bytes.Buffer)
- stderr := new(bytes.Buffer)
- goArgs := []string{verb}
- goArgs = append(goArgs, cfg.BuildFlags...)
- goArgs = append(goArgs, args...)
- cmd := exec.CommandContext(cfg.Context, "go", goArgs...)
- // On darwin the cwd gets resolved to the real path, which breaks anything that
- // expects the working directory to keep the original path, including the
- // go command when dealing with modules.
- // The Go stdlib has a special feature where if the cwd and the PWD are the
- // same node then it trusts the PWD, so by setting it in the env for the child
- // process we fix up all the paths returned by the go command.
- cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir)
- cmd.Dir = cfg.Dir
- cmd.Stdout = stdout
- cmd.Stderr = stderr
- defer func(start time.Time) {
- cfg.Logf("%s for %v, stderr: <<%s>> stdout: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr, stdout)
- }(time.Now())
-
- if err := cmd.Run(); err != nil {
+func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) {
+ cfg := state.cfg
+
+ inv := gocommand.Invocation{
+ Verb: verb,
+ Args: args,
+ BuildFlags: cfg.BuildFlags,
+ Env: cfg.Env,
+ Logf: cfg.Logf,
+ WorkingDir: cfg.Dir,
+ }
+ gocmdRunner := cfg.gocmdRunner
+ if gocmdRunner == nil {
+ gocmdRunner = &gocommand.Runner{}
+ }
+ stdout, stderr, _, err := gocmdRunner.RunRaw(cfg.Context, inv)
+ if err != nil {
// Check for 'go' executable not being found.
if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound)
@@ -924,7 +740,7 @@ func invokeGo(cfg *Config, verb string, args ...string) (*bytes.Buffer, error) {
if !ok {
// Catastrophic error:
// - context cancellation
- return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
+ return nil, xerrors.Errorf("couldn't run 'go': %w", err)
}
// Old go version?
@@ -951,7 +767,12 @@ func invokeGo(cfg *Config, verb string, args ...string) (*bytes.Buffer, error) {
!strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r)
}
if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") {
- if strings.HasPrefix(strings.TrimLeftFunc(stderr.String()[len("# "):], isPkgPathRune), "\n") {
+ msg := stderr.String()[len("# "):]
+ if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") {
+ return stdout, nil
+ }
+ // Treat pkg-config errors as a special case (golang.org/issue/36770).
+ if strings.HasPrefix(msg, "pkg-config") {
return stdout, nil
}
}
@@ -1040,16 +861,6 @@ func invokeGo(cfg *Config, verb string, args ...string) (*bytes.Buffer, error) {
return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
}
}
-
- // As of writing, go list -export prints some non-fatal compilation
- // errors to stderr, even with -e set. We would prefer that it put
- // them in the Package.Error JSON (see https://golang.org/issue/26319).
- // In the meantime, there's nowhere good to put them, but they can
- // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
- // is set.
- if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
- fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr)
- }
return stdout, nil
}
diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go
index a7de62299..3c99b6e48 100644
--- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go
+++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go
@@ -1,12 +1,14 @@
package packages
import (
- "bytes"
"encoding/json"
"fmt"
"go/parser"
"go/token"
+ "log"
+ "os"
"path/filepath"
+ "sort"
"strconv"
"strings"
)
@@ -16,15 +18,20 @@ import (
// sometimes incorrect.
// TODO(matloob): Handle unsupported cases, including the following:
// - determining the correct package to add given a new import path
-func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func() *goInfo) (modifiedPkgs, needPkgs []string, err error) {
+func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) {
havePkgs := make(map[string]string) // importPath -> non-test package ID
needPkgsSet := make(map[string]bool)
modifiedPkgsSet := make(map[string]bool)
+ pkgOfDir := make(map[string][]*Package)
for _, pkg := range response.dr.Packages {
// This is an approximation of import path to id. This can be
// wrong for tests, vendored packages, and a number of other cases.
havePkgs[pkg.PkgPath] = pkg.ID
+ x := commonDir(pkg.GoFiles)
+ if x != "" {
+ pkgOfDir[x] = append(pkgOfDir[x], pkg)
+ }
}
// If no new imports are added, it is safe to avoid loading any needPkgs.
@@ -34,7 +41,23 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func(
// potentially modifying the transitive set of dependencies).
var overlayAddsImports bool
- for opath, contents := range cfg.Overlay {
+ // If both a package and its test package are created by the overlay, we
+ // need the real package first. Process all non-test files before test
+ // files, and make the whole process deterministic while we're at it.
+ var overlayFiles []string
+ for opath := range state.cfg.Overlay {
+ overlayFiles = append(overlayFiles, opath)
+ }
+ sort.Slice(overlayFiles, func(i, j int) bool {
+ iTest := strings.HasSuffix(overlayFiles[i], "_test.go")
+ jTest := strings.HasSuffix(overlayFiles[j], "_test.go")
+ if iTest != jTest {
+ return !iTest // non-tests are before tests.
+ }
+ return overlayFiles[i] < overlayFiles[j]
+ })
+ for _, opath := range overlayFiles {
+ contents := state.cfg.Overlay[opath]
base := filepath.Base(opath)
dir := filepath.Dir(opath)
var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant
@@ -47,6 +70,9 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func(
// to the overlay.
continue
}
+ // if all the overlay files belong to a different package, change the package
+ // name to that package. Otherwise leave it alone; there will be an error message.
+ maybeFixPackageName(pkgName, pkgOfDir, dir)
nextPackage:
for _, p := range response.dr.Packages {
if pkgName != p.Name && p.ID != "command-line-arguments" {
@@ -64,14 +90,8 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func(
testVariantOf = p
continue nextPackage
}
+ // We must have already seen the package of which this is a test variant.
if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath {
- // If we've already seen the test variant,
- // make sure to label which package it is a test variant of.
- if hasTestFiles(pkg) {
- testVariantOf = p
- continue nextPackage
- }
- // If we have already seen the package of which this is a test variant.
if hasTestFiles(p) {
testVariantOf = pkg
}
@@ -86,7 +106,10 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func(
if pkg == nil {
// Try to find the module or gopath dir the file is contained in.
// Then for modules, add the module opath to the beginning.
- pkgPath, ok := getPkgPath(cfg, dir, rootDirs)
+ pkgPath, ok, err := state.getPkgPath(dir)
+ if err != nil {
+ return nil, nil, err
+ }
if !ok {
break
}
@@ -114,6 +137,11 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func(
if isTestFile && !isXTest && testVariantOf != nil {
pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...)
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...)
+ // Add the package under test and its imports to the test variant.
+ pkg.forTest = testVariantOf.PkgPath
+ for k, v := range testVariantOf.Imports {
+ pkg.Imports[k] = &Package{ID: v.ID}
+ }
}
}
}
@@ -130,42 +158,45 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func(
continue
}
for _, imp := range imports {
- _, found := pkg.Imports[imp]
- if !found {
- overlayAddsImports = true
- // TODO(matloob): Handle cases when the following block isn't correct.
- // These include imports of vendored packages, etc.
- id, ok := havePkgs[imp]
- if !ok {
- id = imp
- }
- pkg.Imports[imp] = &Package{ID: id}
- // Add dependencies to the non-test variant version of this package as wel.
- if testVariantOf != nil {
- testVariantOf.Imports[imp] = &Package{ID: id}
+ if _, found := pkg.Imports[imp]; found {
+ continue
+ }
+ overlayAddsImports = true
+ id, ok := havePkgs[imp]
+ if !ok {
+ var err error
+ id, err = state.resolveImport(dir, imp)
+ if err != nil {
+ return nil, nil, err
}
}
+ pkg.Imports[imp] = &Package{ID: id}
+ // Add dependencies to the non-test variant version of this package as well.
+ if testVariantOf != nil {
+ testVariantOf.Imports[imp] = &Package{ID: id}
+ }
}
- continue
}
- // toPkgPath tries to guess the package path given the id.
- // This isn't always correct -- it's certainly wrong for
- // vendored packages' paths.
- toPkgPath := func(id string) string {
- // TODO(matloob): Handle vendor paths.
- i := strings.IndexByte(id, ' ')
- if i >= 0 {
- return id[:i]
+ // toPkgPath guesses the package path given the id.
+ toPkgPath := func(sourceDir, id string) (string, error) {
+ if i := strings.IndexByte(id, ' '); i >= 0 {
+ return state.resolveImport(sourceDir, id[:i])
}
- return id
+ return state.resolveImport(sourceDir, id)
}
- // Do another pass now that new packages have been created to determine the
- // set of missing packages.
+ // Now that new packages have been created, do another pass to determine
+ // the new set of missing packages.
for _, pkg := range response.dr.Packages {
for _, imp := range pkg.Imports {
- pkgPath := toPkgPath(imp.ID)
+ if len(pkg.GoFiles) == 0 {
+ return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath)
+ }
+ pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID)
+ if err != nil {
+ return nil, nil, err
+ }
if _, ok := havePkgs[pkgPath]; !ok {
needPkgsSet[pkgPath] = true
}
@@ -185,6 +216,52 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func(
return modifiedPkgs, needPkgs, err
}
+// resolveImport finds the the ID of a package given its import path.
+// In particular, it will find the right vendored copy when in GOPATH mode.
+func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) {
+ env, err := state.getEnv()
+ if err != nil {
+ return "", err
+ }
+ if env["GOMOD"] != "" {
+ return importPath, nil
+ }
+
+ searchDir := sourceDir
+ for {
+ vendorDir := filepath.Join(searchDir, "vendor")
+ exists, ok := state.vendorDirs[vendorDir]
+ if !ok {
+ info, err := os.Stat(vendorDir)
+ exists = err == nil && info.IsDir()
+ state.vendorDirs[vendorDir] = exists
+ }
+
+ if exists {
+ vendoredPath := filepath.Join(vendorDir, importPath)
+ if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() {
+ // We should probably check for .go files here, but shame on anyone who fools us.
+ path, ok, err := state.getPkgPath(vendoredPath)
+ if err != nil {
+ return "", err
+ }
+ if ok {
+ return path, nil
+ }
+ }
+ }
+
+ // We know we've hit the top of the filesystem when we Dir / and get /,
+ // or C:\ and get C:\, etc.
+ next := filepath.Dir(searchDir)
+ if next == searchDir {
+ break
+ }
+ searchDir = next
+ }
+ return importPath, nil
+}
+
func hasTestFiles(p *Package) bool {
for _, f := range p.GoFiles {
if strings.HasSuffix(f, "_test.go") {
@@ -194,44 +271,69 @@ func hasTestFiles(p *Package) bool {
return false
}
-// determineRootDirs returns a mapping from directories code can be contained in to the
-// corresponding import path prefixes of those directories.
-// Its result is used to try to determine the import path for a package containing
-// an overlay file.
-func determineRootDirs(cfg *Config) map[string]string {
- // Assume modules first:
- out, err := invokeGo(cfg, "list", "-m", "-json", "all")
+// determineRootDirs returns a mapping from absolute directories that could
+// contain code to their corresponding import path prefixes.
+func (state *golistState) determineRootDirs() (map[string]string, error) {
+ env, err := state.getEnv()
if err != nil {
- return determineRootDirsGOPATH(cfg)
+ return nil, err
+ }
+ if env["GOMOD"] != "" {
+ state.rootsOnce.Do(func() {
+ state.rootDirs, state.rootDirsError = state.determineRootDirsModules()
+ })
+ } else {
+ state.rootsOnce.Do(func() {
+ state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH()
+ })
+ }
+ return state.rootDirs, state.rootDirsError
+}
+
+func (state *golistState) determineRootDirsModules() (map[string]string, error) {
+ // This will only return the root directory for the main module.
+ // For now we only support overlays in main modules.
+ // Editing files in the module cache isn't a great idea, so we don't
+ // plan to ever support that, but editing files in replaced modules
+ // is something we may want to support. To do that, we'll want to
+ // do a go list -m to determine the replaced module's module path and
+ // directory, and then a go list -m {{with .Replace}}{{.Dir}}{{end}} <replaced module's path>
+ // from the main module to determine if that module is actually a replacement.
+ // See bcmills's comment here: https://github.com/golang/go/issues/37629#issuecomment-594179751
+ // for more information.
+ out, err := state.invokeGo("list", "-m", "-json")
+ if err != nil {
+ return nil, err
}
m := map[string]string{}
type jsonMod struct{ Path, Dir string }
for dec := json.NewDecoder(out); dec.More(); {
mod := new(jsonMod)
if err := dec.Decode(mod); err != nil {
- return m // Give up and return an empty map. Package won't be found for overlay.
+ return nil, err
}
if mod.Dir != "" && mod.Path != "" {
// This is a valid module; add it to the map.
- m[mod.Dir] = mod.Path
+ absDir, err := filepath.Abs(mod.Dir)
+ if err != nil {
+ return nil, err
+ }
+ m[absDir] = mod.Path
}
}
- return m
+ return m, nil
}
-func determineRootDirsGOPATH(cfg *Config) map[string]string {
+func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) {
m := map[string]string{}
- out, err := invokeGo(cfg, "env", "GOPATH")
- if err != nil {
- // Could not determine root dir mapping. Everything is best-effort, so just return an empty map.
- // When we try to find the import path for a directory, there will be no root-dir match and
- // we'll give up.
- return m
- }
- for _, p := range filepath.SplitList(string(bytes.TrimSpace(out.Bytes()))) {
- m[filepath.Join(p, "src")] = ""
+ for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) {
+ absDir, err := filepath.Abs(dir)
+ if err != nil {
+ return nil, err
+ }
+ m[filepath.Join(absDir, "src")] = ""
}
- return m
+ return m, nil
}
func extractImports(filename string, contents []byte) ([]string, error) {
@@ -291,3 +393,46 @@ func extractPackageName(filename string, contents []byte) (string, bool) {
}
return f.Name.Name, true
}
+
+func commonDir(a []string) string {
+ seen := make(map[string]bool)
+ x := append([]string{}, a...)
+ for _, f := range x {
+ seen[filepath.Dir(f)] = true
+ }
+ if len(seen) > 1 {
+ log.Fatalf("commonDir saw %v for %v", seen, x)
+ }
+ for k := range seen {
+ // len(seen) == 1
+ return k
+ }
+ return "" // no files
+}
+
+// It is possible that the files in the disk directory dir have a different package
+// name from newName, which is deduced from the overlays. If they all have a different
+// package name, and they all have the same package name, then that name becomes
+// the package name.
+// It returns true if it changes the package name, false otherwise.
+func maybeFixPackageName(newName string, pkgOfDir map[string][]*Package, dir string) bool {
+ names := make(map[string]int)
+ for _, p := range pkgOfDir[dir] {
+ names[p.Name]++
+ }
+ if len(names) != 1 {
+ // some files are in different packages
+ return false
+ }
+ oldName := ""
+ for k := range names {
+ oldName = k
+ }
+ if newName == oldName {
+ return false
+ }
+ for _, p := range pkgOfDir[dir] {
+ p.Name = newName
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
index aff94a3fe..7ea37e7ee 100644
--- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go
+++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
@@ -38,7 +38,7 @@ var modeStrings = []string{
func (mod LoadMode) String() string {
m := mod
if m == 0 {
- return fmt.Sprintf("LoadMode(0)")
+ return "LoadMode(0)"
}
var out []string
for i, x := range allModes {
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
index f98a0bdca..03fd999c0 100644
--- a/vendor/golang.org/x/tools/go/packages/packages.go
+++ b/vendor/golang.org/x/tools/go/packages/packages.go
@@ -23,6 +23,8 @@ import (
"sync"
"golang.org/x/tools/go/gcexportdata"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/packagesinternal"
)
// A LoadMode controls the amount of detail to return when loading.
@@ -34,6 +36,9 @@ import (
// Load may return more information than requested.
type LoadMode int
+// TODO(matloob): When a V2 of go/packages is released, rename NeedExportsFile to
+// NeedExportFile to make it consistent with the Package field it's adding.
+
const (
// NeedName adds Name and PkgPath.
NeedName LoadMode = 1 << iota
@@ -51,7 +56,7 @@ const (
// NeedDeps adds the fields requested by the LoadMode in the packages in Imports.
NeedDeps
- // NeedExportsFile adds ExportsFile.
+ // NeedExportsFile adds ExportFile.
NeedExportsFile
// NeedTypes adds Types, Fset, and IllTyped.
@@ -123,6 +128,9 @@ type Config struct {
//
Env []string
+ // gocmdRunner guards go command calls from concurrency errors.
+ gocmdRunner *gocommand.Runner
+
// BuildFlags is a list of command-line flags to be passed through to
// the build system's query tool.
BuildFlags []string
@@ -292,6 +300,27 @@ type Package struct {
// TypesSizes provides the effective size function for types in TypesInfo.
TypesSizes types.Sizes
+
+ // forTest is the package under test, if any.
+ forTest string
+
+ // module is the module information for the package if it exists.
+ module *packagesinternal.Module
+}
+
+func init() {
+ packagesinternal.GetForTest = func(p interface{}) string {
+ return p.(*Package).forTest
+ }
+ packagesinternal.GetModule = func(p interface{}) *packagesinternal.Module {
+ return p.(*Package).module
+ }
+ packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner {
+ return config.(*Config).gocmdRunner
+ }
+ packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {
+ config.(*Config).gocmdRunner = runner
+ }
}
// An Error describes a problem with a package's metadata, syntax, or types.
@@ -454,6 +483,9 @@ func newLoader(cfg *Config) *loader {
if ld.Config.Env == nil {
ld.Config.Env = os.Environ()
}
+ if ld.Config.gocmdRunner == nil {
+ ld.Config.gocmdRunner = &gocommand.Runner{}
+ }
if ld.Context == nil {
ld.Context = context.Background()
}
@@ -500,12 +532,23 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
if i, found := rootMap[pkg.ID]; found {
rootIndex = i
}
+
+ // Overlays can invalidate export data.
+ // TODO(matloob): make this check fine-grained based on dependencies on overlaid files
+ exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe"
+ // This package needs type information if the caller requested types and the package is
+ // either a root, or it's a non-root and the user requested dependencies ...
+ needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
+ // This package needs source if the call requested source (or types info, which implies source)
+ // and the package is either a root, or itas a non- root and the user requested dependencies...
+ needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) ||
+ // ... or if we need types and the exportData is invalid. We fall back to (incompletely)
+ // typechecking packages from source if they fail to compile.
+ (ld.Mode&NeedTypes|NeedTypesInfo != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe"
lpkg := &loaderPackage{
Package: pkg,
- needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0,
- needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0 ||
- len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files
- pkg.ExportFile == "" && pkg.PkgPath != "unsafe",
+ needtypes: needtypes,
+ needsrc: needsrc,
}
ld.pkgs[lpkg.ID] = lpkg
if rootIndex >= 0 {
diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go
index b4f428767..dbe5b49a9 100644
--- a/vendor/golang.org/x/tools/imports/forward.go
+++ b/vendor/golang.org/x/tools/imports/forward.go
@@ -4,6 +4,7 @@ package imports // import "golang.org/x/tools/imports"
import (
"go/build"
+ "log"
"os"
intimp "golang.org/x/tools/internal/imports"
@@ -47,7 +48,6 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) {
GO111MODULE: os.Getenv("GO111MODULE"),
GOPROXY: os.Getenv("GOPROXY"),
GOSUMDB: os.Getenv("GOSUMDB"),
- Debug: Debug,
LocalPrefix: LocalPrefix,
},
AllErrors: opt.AllErrors,
@@ -57,6 +57,9 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) {
TabIndent: opt.TabIndent,
TabWidth: opt.TabWidth,
}
+ if Debug {
+ intopt.Env.Logf = log.Printf
+ }
return intimp.Process(filename, src, intopt)
}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
index 7219c8e9f..9887f7e7a 100644
--- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
@@ -14,14 +14,14 @@ import (
"sync"
)
-// TraverseLink is used as a return value from WalkFuncs to indicate that the
+// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the
// symlink named in the call may be traversed.
-var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
+var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
-// SkipFiles is a used as a return value from WalkFuncs to indicate that the
+// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the
// callback should not be called for any other files in the current directory.
// Child directories will still be traversed.
-var SkipFiles = errors.New("fastwalk: skip remaining files in directory")
+var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory")
// Walk is a faster implementation of filepath.Walk.
//
@@ -167,7 +167,7 @@ func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error {
err := w.fn(joined, typ)
if typ == os.ModeSymlink {
- if err == TraverseLink {
+ if err == ErrTraverseLink {
// Set callbackDone so we don't call it twice for both the
// symlink-as-symlink and the symlink-as-directory later:
w.enqueue(walkItem{dir: joined, callbackDone: true})
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go
index a906b8759..b0d6327a9 100644
--- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go
@@ -26,7 +26,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
continue
}
if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil {
- if err == SkipFiles {
+ if err == ErrSkipFiles {
skipFiles = true
continue
}
diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go
index 3369b1a0b..5901a8f61 100644
--- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go
+++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go
@@ -66,7 +66,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
continue
}
if err := fn(dirName, name, typ); err != nil {
- if err == SkipFiles {
+ if err == ErrSkipFiles {
skipFiles = true
continue
}
@@ -76,8 +76,9 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
}
func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) {
- // golang.org/issue/15653
- dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
+ // golang.org/issue/37269
+ dirent := &syscall.Dirent{}
+ copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(dirent))[:], buf)
if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v {
panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v))
}
diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
index d06756228..390cb9db7 100644
--- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
+++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go
@@ -23,8 +23,10 @@ import (
// Options controls the behavior of a Walk call.
type Options struct {
- Debug bool // Enable debug logging
- ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules.
+ // If Logf is non-nil, debug logging is enabled through this function.
+ Logf func(format string, args ...interface{})
+ // Search module caches. Also disables legacy goimports ignore rules.
+ ModulesEnabled bool
}
// RootType indicates the type of a Root.
@@ -80,14 +82,14 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root
// walkDir creates a walker and starts fastwalk with this walker.
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
- if opts.Debug {
- log.Printf("skipping nonexistent directory: %v", root.Path)
+ if opts.Logf != nil {
+ opts.Logf("skipping nonexistent directory: %v", root.Path)
}
return
}
start := time.Now()
- if opts.Debug {
- log.Printf("gopathwalk: scanning %s", root.Path)
+ if opts.Logf != nil {
+ opts.Logf("gopathwalk: scanning %s", root.Path)
}
w := &walker{
root: root,
@@ -100,8 +102,8 @@ func walkDir(root Root, add func(Root, string), skip func(root Root, dir string)
log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err)
}
- if opts.Debug {
- log.Printf("gopathwalk: scanned %s in %v", root.Path, time.Since(start))
+ if opts.Logf != nil {
+ opts.Logf("gopathwalk: scanned %s in %v", root.Path, time.Since(start))
}
}
@@ -130,11 +132,11 @@ func (w *walker) init() {
full := filepath.Join(w.root.Path, p)
if fi, err := os.Stat(full); err == nil {
w.ignoredDirs = append(w.ignoredDirs, fi)
- if w.opts.Debug {
- log.Printf("Directory added to ignore list: %s", full)
+ if w.opts.Logf != nil {
+ w.opts.Logf("Directory added to ignore list: %s", full)
}
- } else if w.opts.Debug {
- log.Printf("Error statting ignored directory: %v", err)
+ } else if w.opts.Logf != nil {
+ w.opts.Logf("Error statting ignored directory: %v", err)
}
}
}
@@ -145,11 +147,11 @@ func (w *walker) init() {
func (w *walker) getIgnoredDirs(path string) []string {
file := filepath.Join(path, ".goimportsignore")
slurp, err := ioutil.ReadFile(file)
- if w.opts.Debug {
+ if w.opts.Logf != nil {
if err != nil {
- log.Print(err)
+ w.opts.Logf("%v", err)
} else {
- log.Printf("Read %s", file)
+ w.opts.Logf("Read %s", file)
}
}
if err != nil {
@@ -189,14 +191,14 @@ func (w *walker) walk(path string, typ os.FileMode) error {
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
// Doesn't make sense to have regular files
// directly in your $GOPATH/src or $GOROOT/src.
- return fastwalk.SkipFiles
+ return fastwalk.ErrSkipFiles
}
if !strings.HasSuffix(path, ".go") {
return nil
}
w.add(w.root, dir)
- return fastwalk.SkipFiles
+ return fastwalk.ErrSkipFiles
}
if typ == os.ModeDir {
base := filepath.Base(path)
@@ -224,7 +226,7 @@ func (w *walker) walk(path string, typ os.FileMode) error {
return nil
}
if w.shouldTraverse(dir, fi) {
- return fastwalk.TraverseLink
+ return fastwalk.ErrTraverseLink
}
}
return nil
diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go
index dffee291b..264d001ed 100644
--- a/vendor/golang.org/x/tools/internal/imports/fix.go
+++ b/vendor/golang.org/x/tools/internal/imports/fix.go
@@ -14,7 +14,6 @@ import (
"go/token"
"io/ioutil"
"os"
- "os/exec"
"path"
"path/filepath"
"reflect"
@@ -22,11 +21,11 @@ import (
"strconv"
"strings"
"sync"
- "time"
"unicode"
"unicode/utf8"
"golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/gopathwalk"
)
@@ -263,7 +262,7 @@ type pass struct {
// loadPackageNames saves the package names for everything referenced by imports.
func (p *pass) loadPackageNames(imports []*ImportInfo) error {
- if p.env.Debug {
+ if p.env.Logf != nil {
p.env.Logf("loading package names for %v packages", len(imports))
defer func() {
p.env.Logf("done loading package names for %v packages", len(imports))
@@ -335,7 +334,7 @@ func (p *pass) load() ([]*ImportFix, bool) {
if p.loadRealPackageNames {
err := p.loadPackageNames(append(imports, p.candidates...))
if err != nil {
- if p.env.Debug {
+ if p.env.Logf != nil {
p.env.Logf("loading package names: %v", err)
}
return nil, false
@@ -529,7 +528,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
return nil, err
}
srcDir := filepath.Dir(abs)
- if env.Debug {
+ if env.Logf != nil {
env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir)
}
@@ -537,7 +536,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
// derive package names from import paths, see if the file is already
// complete. We can't add any imports yet, because we don't know
// if missing references are actually package vars.
- p := &pass{fset: fset, f: f, srcDir: srcDir}
+ p := &pass{fset: fset, f: f, srcDir: srcDir, env: env}
if fixes, done := p.load(); done {
return fixes, nil
}
@@ -559,8 +558,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
}
// Third pass: get real package names where we had previously used
- // the naive algorithm. This is the first step that will use the
- // environment, so we provide it here for the first time.
+ // the naive algorithm.
p = &pass{fset: fset, f: f, srcDir: srcDir, env: env}
p.loadRealPackageNames = true
p.otherFiles = otherFiles
@@ -629,12 +627,44 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena
return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg)
},
exportsLoaded: func(pkg *pkg, exports []string) {
+ // If we're an x_test, load the package under test's test variant.
+ if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) {
+ var err error
+ _, exports, err = loadExportsFromFiles(ctx, env, pkg.dir, true)
+ if err != nil {
+ return
+ }
+ }
wrappedCallback.exportsLoaded(pkg, exports)
},
}
return env.GetResolver().scan(ctx, scanFilter)
}
+func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) map[string]int {
+ result := make(map[string]int)
+ for _, path := range paths {
+ result[path] = env.GetResolver().scoreImportPath(ctx, path)
+ }
+ return result
+}
+
+func PrimeCache(ctx context.Context, env *ProcessEnv) error {
+ // Fully scan the disk for directories, but don't actually read any Go files.
+ callback := &scanCallback{
+ rootFound: func(gopathwalk.Root) bool {
+ return true
+ },
+ dirFound: func(pkg *pkg) bool {
+ return false
+ },
+ packageNameLoaded: func(pkg *pkg) bool {
+ return false
+ },
+ }
+ return getCandidatePkgs(ctx, callback, "", "", env)
+}
+
func candidateImportName(pkg *pkg) string {
if ImportPathToAssumedName(pkg.importPathShort) != pkg.packageName {
return pkg.packageName
@@ -716,19 +746,29 @@ func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
// the go command, the go/build package, etc.
type ProcessEnv struct {
LocalPrefix string
- Debug bool
+
+ GocmdRunner *gocommand.Runner
+
+ BuildFlags []string
// If non-empty, these will be used instead of the
// process-wide values.
GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string
WorkingDir string
- // Logf is the default logger for the ProcessEnv.
+ // If Logf is non-nil, debug logging is enabled through this function.
Logf func(format string, args ...interface{})
resolver Resolver
}
+// CopyConfig copies the env's configuration into a new env.
+func (e *ProcessEnv) CopyConfig() *ProcessEnv {
+ copy := *e
+ copy.resolver = nil
+ return &copy
+}
+
func (e *ProcessEnv) env() []string {
env := os.Environ()
add := func(k, v string) {
@@ -752,12 +792,12 @@ func (e *ProcessEnv) GetResolver() Resolver {
if e.resolver != nil {
return e.resolver
}
- out, err := e.invokeGo("env", "GOMOD")
+ out, err := e.invokeGo(context.TODO(), "env", "GOMOD")
if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 {
- e.resolver = &gopathResolver{env: e}
+ e.resolver = newGopathResolver(e)
return e.resolver
}
- e.resolver = &ModuleResolver{env: e}
+ e.resolver = newModuleResolver(e)
return e.resolver
}
@@ -783,37 +823,24 @@ func (e *ProcessEnv) buildContext() *build.Context {
return &ctx
}
-func (e *ProcessEnv) invokeGo(args ...string) (*bytes.Buffer, error) {
- cmd := exec.Command("go", args...)
- stdout := &bytes.Buffer{}
- stderr := &bytes.Buffer{}
- cmd.Stdout = stdout
- cmd.Stderr = stderr
- cmd.Env = e.env()
- cmd.Dir = e.WorkingDir
-
- if e.Debug {
- defer func(start time.Time) { e.Logf("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
+func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) (*bytes.Buffer, error) {
+ inv := gocommand.Invocation{
+ Verb: verb,
+ Args: args,
+ BuildFlags: e.BuildFlags,
+ Env: e.env(),
+ Logf: e.Logf,
+ WorkingDir: e.WorkingDir,
}
- if err := cmd.Run(); err != nil {
- return nil, fmt.Errorf("running go: %v (stderr:\n%s)", err, stderr)
- }
- return stdout, nil
-}
-
-func cmdDebugStr(cmd *exec.Cmd) string {
- env := make(map[string]string)
- for _, kv := range cmd.Env {
- split := strings.Split(kv, "=")
- k, v := split[0], split[1]
- env[k] = v
- }
-
- return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args)
+ return e.GocmdRunner.Run(ctx, inv)
}
func addStdlibCandidates(pass *pass, refs references) {
add := func(pkg string) {
+ // Prevent self-imports.
+ if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.GOROOT, "src", pkg) == pass.srcDir {
+ return
+ }
exports := copyExports(stdlib[pkg])
pass.addCandidate(
&ImportInfo{ImportPath: pkg},
@@ -842,7 +869,9 @@ type Resolver interface {
scan(ctx context.Context, callback *scanCallback) error
// loadExports returns the set of exported symbols in the package at dir.
// loadExports may be called concurrently.
- loadExports(ctx context.Context, pkg *pkg) (string, []string, error)
+ loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error)
+ // scoreImportPath returns the relevance for an import path.
+ scoreImportPath(ctx context.Context, path string) int
ClearForNewScan()
}
@@ -992,24 +1021,36 @@ func ImportPathToAssumedName(importPath string) string {
// gopathResolver implements resolver for GOPATH workspaces.
type gopathResolver struct {
- env *ProcessEnv
- cache *dirInfoCache
+ env *ProcessEnv
+ walked bool
+ cache *dirInfoCache
+ scanSema chan struct{} // scanSema prevents concurrent scans.
}
-func (r *gopathResolver) init() {
- if r.cache == nil {
- r.cache = &dirInfoCache{
- dirs: map[string]*directoryPackageInfo{},
- }
+func newGopathResolver(env *ProcessEnv) *gopathResolver {
+ r := &gopathResolver{
+ env: env,
+ cache: &dirInfoCache{
+ dirs: map[string]*directoryPackageInfo{},
+ listeners: map[*int]cacheListener{},
+ },
+ scanSema: make(chan struct{}, 1),
}
+ r.scanSema <- struct{}{}
+ return r
}
func (r *gopathResolver) ClearForNewScan() {
- r.cache = nil
+ <-r.scanSema
+ r.cache = &dirInfoCache{
+ dirs: map[string]*directoryPackageInfo{},
+ listeners: map[*int]cacheListener{},
+ }
+ r.walked = false
+ r.scanSema <- struct{}{}
}
func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
- r.init()
names := map[string]string{}
for _, path := range importPaths {
names[path] = importPathToName(r.env, path, srcDir)
@@ -1137,7 +1178,6 @@ func distance(basepath, targetpath string) int {
}
func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error {
- r.init()
add := func(root gopathwalk.Root, dir string) {
// We assume cached directories have not changed. We can skip them and their
// children.
@@ -1154,21 +1194,15 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error
}
r.cache.Store(dir, info)
}
- roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), callback.rootFound)
- gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false})
- for _, dir := range r.cache.Keys() {
- if ctx.Err() != nil {
- return nil
- }
-
- info, ok := r.cache.Load(dir)
- if !ok {
- continue
+ processDir := func(info directoryPackageInfo) {
+ // Skip this directory if we were not able to get the package information successfully.
+ if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
+ return
}
p := &pkg{
importPathShort: info.nonCanonicalImportPath,
- dir: dir,
+ dir: info.dir,
relevance: MaxRelevance - 1,
}
if info.rootType == gopathwalk.RootGOROOT {
@@ -1176,24 +1210,52 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error
}
if !callback.dirFound(p) {
- continue
+ return
}
var err error
p.packageName, err = r.cache.CachePackageName(info)
if err != nil {
- continue
+ return
}
if !callback.packageNameLoaded(p) {
- continue
+ return
}
- if _, exports, err := r.loadExports(ctx, p); err == nil {
+ if _, exports, err := r.loadExports(ctx, p, false); err == nil {
callback.exportsLoaded(p, exports)
}
}
+ stop := r.cache.ScanAndListen(ctx, processDir)
+ defer stop()
+ // The callback is not necessarily safe to use in the goroutine below. Process roots eagerly.
+ roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), callback.rootFound)
+ // We can't cancel walks, because we need them to finish to have a usable
+ // cache. Instead, run them in a separate goroutine and detach.
+ scanDone := make(chan struct{})
+ go func() {
+ select {
+ case <-ctx.Done():
+ return
+ case <-r.scanSema:
+ }
+ defer func() { r.scanSema <- struct{}{} }()
+ gopathwalk.Walk(roots, add, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: false})
+ close(scanDone)
+ }()
+ select {
+ case <-ctx.Done():
+ case <-scanDone:
+ }
return nil
}
+func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) int {
+ if _, ok := stdlib[path]; ok {
+ return MaxRelevance
+ }
+ return MaxRelevance - 1
+}
+
func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) []gopathwalk.Root {
var result []gopathwalk.Root
for _, root := range roots {
@@ -1205,12 +1267,11 @@ func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) []
return result
}
-func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) {
- r.init()
- if info, ok := r.cache.Load(pkg.dir); ok {
+func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) {
+ if info, ok := r.cache.Load(pkg.dir); ok && !includeTest {
return r.cache.CacheExports(ctx, r.env, info)
}
- return loadExportsFromFiles(ctx, r.env, pkg.dir)
+ return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest)
}
// VendorlessPath returns the devendorized version of the import path ipath.
@@ -1226,7 +1287,7 @@ func VendorlessPath(ipath string) string {
return ipath
}
-func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (string, []string, error) {
+func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) {
var exports []string
// Look for non-test, buildable .go files which could provide exports.
@@ -1237,7 +1298,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str
var files []os.FileInfo
for _, fi := range all {
name := fi.Name()
- if !strings.HasSuffix(name, ".go") || strings.HasSuffix(name, "_test.go") {
+ if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) {
continue
}
match, err := env.buildContext().MatchFile(dir, fi.Name())
@@ -1270,6 +1331,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str
// handled by MatchFile above.
continue
}
+ if includeTest && strings.HasSuffix(f.Name.Name, "_test") {
+ // x_test package. We want internal test files only.
+ continue
+ }
pkgName = f.Name.Name
for name := range f.Scope.Objects {
if ast.IsExported(name) {
@@ -1278,7 +1343,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str
}
}
- if env.Debug {
+ if env.Logf != nil {
sortedExports := append([]string(nil), exports...)
sort.Strings(sortedExports)
env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, strings.Join(sortedExports, ", "))
@@ -1294,7 +1359,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
// ones. Note that this sorts by the de-vendored name, so
// there's no "penalty" for vendoring.
sort.Sort(byDistanceOrImportPathShortLength(candidates))
- if pass.env.Debug {
+ if pass.env.Logf != nil {
for i, c := range candidates {
pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
}
@@ -1332,12 +1397,14 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
wg.Done()
}()
- if pass.env.Debug {
+ if pass.env.Logf != nil {
pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
}
- exports, err := loadExportsForPackage(ctx, pass.env, pkgName, c.pkg)
+ // If we're an x_test, load the package under test's test variant.
+ includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir
+ _, exports, err := pass.env.GetResolver().loadExports(ctx, c.pkg, includeTest)
if err != nil {
- if pass.env.Debug {
+ if pass.env.Logf != nil {
pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
}
resc <- nil
@@ -1372,17 +1439,6 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
return nil, nil
}
-func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg string, pkg *pkg) ([]string, error) {
- pkgName, exports, err := env.GetResolver().loadExports(ctx, pkg)
- if err != nil {
- return nil, err
- }
- if expectPkg != pkgName {
- return nil, fmt.Errorf("dir %v is package %v, wanted %v", pkg.dir, pkgName, expectPkg)
- }
- return exports, err
-}
-
// pkgIsCandidate reports whether pkg is a candidate for satisfying the
// finding which package pkgIdent in the file named by filename is trying
// to refer to.
diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go
index 7c08eed66..f43d6b22e 100644
--- a/vendor/golang.org/x/tools/internal/imports/imports.go
+++ b/vendor/golang.org/x/tools/internal/imports/imports.go
@@ -21,13 +21,13 @@ import (
"go/token"
"io"
"io/ioutil"
- "log"
"os"
"regexp"
"strconv"
"strings"
"golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/internal/gocommand"
)
// Options is golang.org/x/tools/imports.Options with extra internal-only options.
@@ -119,7 +119,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
// GetAllCandidates gets all of the packages starting with prefix that can be
// imported by filename, sorted by import path.
func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefix, filename, filePkg string, opt *Options) error {
- _, opt, err := initialize(filename, nil, opt)
+ _, opt, err := initialize(filename, []byte{}, opt)
if err != nil {
return err
}
@@ -128,7 +128,7 @@ func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefi
// GetPackageExports returns all known packages with name pkg and their exports.
func GetPackageExports(ctx context.Context, callback func(PackageExport), searchPkg, filename, filePkg string, opt *Options) error {
- _, opt, err := initialize(filename, nil, opt)
+ _, opt, err := initialize(filename, []byte{}, opt)
if err != nil {
return err
}
@@ -155,12 +155,10 @@ func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, er
GOSUMDB: os.Getenv("GOSUMDB"),
}
}
-
- // Set the logger if the user has not provided it.
- if opt.Env.Logf == nil {
- opt.Env.Logf = log.Printf
+ // Set the gocmdRunner if the user has not provided it.
+ if opt.Env.GocmdRunner == nil {
+ opt.Env.GocmdRunner = &gocommand.Runner{}
}
-
if src == nil {
b, err := ioutil.ReadFile(filename)
if err != nil {
diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go
index 2ef55b931..69e3eecc4 100644
--- a/vendor/golang.org/x/tools/internal/imports/mod.go
+++ b/vendor/golang.org/x/tools/internal/imports/mod.go
@@ -14,9 +14,9 @@ import (
"strconv"
"strings"
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/semver"
"golang.org/x/tools/internal/gopathwalk"
- "golang.org/x/tools/internal/module"
- "golang.org/x/tools/internal/semver"
)
// ModuleResolver implements resolver for modules using the go command as little
@@ -26,12 +26,13 @@ type ModuleResolver struct {
moduleCacheDir string
dummyVendorMod *ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory.
roots []gopathwalk.Root
+ scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots.
scannedRoots map[gopathwalk.Root]bool
- Initialized bool
- Main *ModuleJSON
- ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path...
- ModsByDir []*ModuleJSON // ...or Dir.
+ initialized bool
+ main *ModuleJSON
+ modsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path...
+ modsByDir []*ModuleJSON // ...or Dir.
// moduleCacheCache stores information about the module cache.
moduleCacheCache *dirInfoCache
@@ -48,8 +49,17 @@ type ModuleJSON struct {
GoVersion string // go version used in module
}
+func newModuleResolver(e *ProcessEnv) *ModuleResolver {
+ r := &ModuleResolver{
+ env: e,
+ scanSema: make(chan struct{}, 1),
+ }
+ r.scanSema <- struct{}{}
+ return r
+}
+
func (r *ModuleResolver) init() error {
- if r.Initialized {
+ if r.initialized {
return nil
}
mainMod, vendorEnabled, err := vendorEnabled(r.env)
@@ -60,13 +70,13 @@ func (r *ModuleResolver) init() error {
if mainMod != nil && vendorEnabled {
// Vendor mode is on, so all the non-Main modules are irrelevant,
// and we need to search /vendor for everything.
- r.Main = mainMod
+ r.main = mainMod
r.dummyVendorMod = &ModuleJSON{
Path: "",
Dir: filepath.Join(mainMod.Dir, "vendor"),
}
- r.ModsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod}
- r.ModsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod}
+ r.modsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod}
+ r.modsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod}
} else {
// Vendor mode is off, so run go list -m ... to find everything.
r.initAllMods()
@@ -74,15 +84,15 @@ func (r *ModuleResolver) init() error {
r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod")
- sort.Slice(r.ModsByModPath, func(i, j int) bool {
+ sort.Slice(r.modsByModPath, func(i, j int) bool {
count := func(x int) int {
- return strings.Count(r.ModsByModPath[x].Path, "/")
+ return strings.Count(r.modsByModPath[x].Path, "/")
}
return count(j) < count(i) // descending order
})
- sort.Slice(r.ModsByDir, func(i, j int) bool {
+ sort.Slice(r.modsByDir, func(i, j int) bool {
count := func(x int) int {
- return strings.Count(r.ModsByDir[x].Dir, "/")
+ return strings.Count(r.modsByDir[x].Dir, "/")
}
return count(j) < count(i) // descending order
})
@@ -90,8 +100,8 @@ func (r *ModuleResolver) init() error {
r.roots = []gopathwalk.Root{
{filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
}
- if r.Main != nil {
- r.roots = append(r.roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule})
+ if r.main != nil {
+ r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
}
if vendorEnabled {
r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther})
@@ -105,13 +115,13 @@ func (r *ModuleResolver) init() error {
}
}
// Walk dependent modules before scanning the full mod cache, direct deps first.
- for _, mod := range r.ModsByModPath {
- if !mod.Indirect {
+ for _, mod := range r.modsByModPath {
+ if !mod.Indirect && !mod.Main {
addDep(mod)
}
}
- for _, mod := range r.ModsByModPath {
- if mod.Indirect {
+ for _, mod := range r.modsByModPath {
+ if mod.Indirect && !mod.Main {
addDep(mod)
}
}
@@ -121,20 +131,22 @@ func (r *ModuleResolver) init() error {
r.scannedRoots = map[gopathwalk.Root]bool{}
if r.moduleCacheCache == nil {
r.moduleCacheCache = &dirInfoCache{
- dirs: map[string]*directoryPackageInfo{},
+ dirs: map[string]*directoryPackageInfo{},
+ listeners: map[*int]cacheListener{},
}
}
if r.otherCache == nil {
r.otherCache = &dirInfoCache{
- dirs: map[string]*directoryPackageInfo{},
+ dirs: map[string]*directoryPackageInfo{},
+ listeners: map[*int]cacheListener{},
}
}
- r.Initialized = true
+ r.initialized = true
return nil
}
func (r *ModuleResolver) initAllMods() error {
- stdout, err := r.env.invokeGo("list", "-m", "-json", "...")
+ stdout, err := r.env.invokeGo(context.TODO(), "list", "-m", "-json", "...")
if err != nil {
return err
}
@@ -144,34 +156,43 @@ func (r *ModuleResolver) initAllMods() error {
return err
}
if mod.Dir == "" {
- if r.env.Debug {
+ if r.env.Logf != nil {
r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path)
}
// Can't do anything with a module that's not downloaded.
continue
}
- r.ModsByModPath = append(r.ModsByModPath, mod)
- r.ModsByDir = append(r.ModsByDir, mod)
+ // golang/go#36193: the go command doesn't always clean paths.
+ mod.Dir = filepath.Clean(mod.Dir)
+ r.modsByModPath = append(r.modsByModPath, mod)
+ r.modsByDir = append(r.modsByDir, mod)
if mod.Main {
- r.Main = mod
+ r.main = mod
}
}
return nil
}
func (r *ModuleResolver) ClearForNewScan() {
+ <-r.scanSema
r.scannedRoots = map[gopathwalk.Root]bool{}
r.otherCache = &dirInfoCache{
- dirs: map[string]*directoryPackageInfo{},
+ dirs: map[string]*directoryPackageInfo{},
+ listeners: map[*int]cacheListener{},
}
+ r.scanSema <- struct{}{}
}
func (r *ModuleResolver) ClearForNewMod() {
- env := r.env
+ <-r.scanSema
*r = ModuleResolver{
- env: env,
+ env: r.env,
+ moduleCacheCache: r.moduleCacheCache,
+ otherCache: r.otherCache,
+ scanSema: r.scanSema,
}
r.init()
+ r.scanSema <- struct{}{}
}
// findPackage returns the module and directory that contains the package at
@@ -179,7 +200,7 @@ func (r *ModuleResolver) ClearForNewMod() {
func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) {
// This can't find packages in the stdlib, but that's harmless for all
// the existing code paths.
- for _, m := range r.ModsByModPath {
+ for _, m := range r.modsByModPath {
if !strings.HasPrefix(importPath, m.Path) {
continue
}
@@ -273,7 +294,7 @@ func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON {
// - in /vendor/ in -mod=vendor mode.
// - nested module? Dunno.
// Rumor has it that replace targets cannot contain other replace targets.
- for _, m := range r.ModsByDir {
+ for _, m := range r.modsByDir {
if !strings.HasPrefix(dir, m.Dir) {
continue
}
@@ -394,25 +415,19 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
if !callback.packageNameLoaded(pkg) {
return
}
- _, exports, err := r.loadExports(ctx, pkg)
+ _, exports, err := r.loadExports(ctx, pkg, false)
if err != nil {
return
}
callback.exportsLoaded(pkg, exports)
}
- // Everything we already had is in the cache. Process it now, in hopes we
- // we don't need anything new.
- for _, dir := range r.cacheKeys() {
- if ctx.Err() != nil {
- return nil
- }
- info, ok := r.cacheLoad(dir)
- if !ok {
- continue
- }
- processDir(info)
- }
+ // Start processing everything in the cache, and listen for the new stuff
+ // we discover in the walk below.
+ stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir)
+ defer stop1()
+ stop2 := r.otherCache.ScanAndListen(ctx, processDir)
+ defer stop2()
// We assume cached directories are fully cached, including all their
// children, and have not changed. We can skip them.
@@ -428,36 +443,66 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
return packageScanned
}
- // Add anything new to the cache, and process it if we're still looking.
+ // Add anything new to the cache, and process it if we're still listening.
add := func(root gopathwalk.Root, dir string) {
- info := r.scanDirForPackage(root, dir)
- r.cacheStore(info)
- if ctx.Err() == nil {
- processDir(info)
- }
+ r.cacheStore(r.scanDirForPackage(root, dir))
}
+ // r.roots and the callback are not necessarily safe to use in the
+ // goroutine below. Process them eagerly.
+ roots := filterRoots(r.roots, callback.rootFound)
// We can't cancel walks, because we need them to finish to have a usable
- // cache. We can do them one by one and stop in between.
- // TODO(heschi): Run asynchronously and detach on cancellation? Would risk
- // racy callbacks.
- for _, root := range r.roots {
- if ctx.Err() != nil {
- return nil
+ // cache. Instead, run them in a separate goroutine and detach.
+ scanDone := make(chan struct{})
+ go func() {
+ select {
+ case <-ctx.Done():
+ return
+ case <-r.scanSema:
}
+ defer func() { r.scanSema <- struct{}{} }()
+ // We have the lock on r.scannedRoots, and no other scans can run.
+ for _, root := range roots {
+ if ctx.Err() != nil {
+ return
+ }
- if r.scannedRoots[root] {
- continue
- }
- if !callback.rootFound(root) {
- continue
+ if r.scannedRoots[root] {
+ continue
+ }
+ gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: true})
+ r.scannedRoots[root] = true
}
- gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true})
- r.scannedRoots[root] = true
+ close(scanDone)
+ }()
+ select {
+ case <-ctx.Done():
+ case <-scanDone:
}
return nil
}
+func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int {
+ if _, ok := stdlib[path]; ok {
+ return MaxRelevance
+ }
+ mod, _ := r.findPackage(path)
+ return modRelevance(mod)
+}
+
+func modRelevance(mod *ModuleJSON) int {
+ switch {
+ case mod == nil: // out of scope
+ return MaxRelevance - 4
+ case mod.Indirect:
+ return MaxRelevance - 3
+ case !mod.Main:
+ return MaxRelevance - 2
+ default:
+ return MaxRelevance - 1 // main module ties with stdlib
+ }
+}
+
// canonicalize gets the result of canonicalizing the packages using the results
// of initializing the resolver from 'go list -m'.
func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
@@ -472,14 +517,9 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
}
importPath := info.nonCanonicalImportPath
- relevance := MaxRelevance - 3
+ mod := r.findModuleByDir(info.dir)
// Check if the directory is underneath a module that's in scope.
- if mod := r.findModuleByDir(info.dir); mod != nil {
- if mod.Indirect {
- relevance = MaxRelevance - 2
- } else {
- relevance = MaxRelevance - 1
- }
+ if mod != nil {
// It is. If dir is the target of a replace directive,
// our guessed import path is wrong. Use the real one.
if mod.Dir == info.dir {
@@ -497,7 +537,7 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
res := &pkg{
importPathShort: importPath,
dir: info.dir,
- relevance: relevance,
+ relevance: modRelevance(mod),
}
// We may have discovered a package that has a different version
// in scope already. Canonicalize to that one if possible.
@@ -507,14 +547,14 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
return res, nil
}
-func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) {
+func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) {
if err := r.init(); err != nil {
return "", nil, err
}
- if info, ok := r.cacheLoad(pkg.dir); ok {
+ if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest {
return r.cacheExports(ctx, r.env, info)
}
- return loadExportsFromFiles(ctx, r.env, pkg.dir)
+ return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest)
}
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
@@ -532,7 +572,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
}
switch root.Type {
case gopathwalk.RootCurrentModule:
- importPath = path.Join(r.Main.Path, filepath.ToSlash(subdir))
+ importPath = path.Join(r.main.Path, filepath.ToSlash(subdir))
case gopathwalk.RootModuleCache:
matches := modCacheRegexp.FindStringSubmatch(subdir)
if len(matches) == 0 {
@@ -541,9 +581,9 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
err: fmt.Errorf("invalid module cache path: %v", subdir),
}
}
- modPath, err := module.DecodePath(filepath.ToSlash(matches[1]))
+ modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
if err != nil {
- if r.env.Debug {
+ if r.env.Logf != nil {
r.env.Logf("decoding module cache path %q: %v", subdir, err)
}
return directoryPackageInfo{
@@ -659,7 +699,7 @@ func getMainModuleAnd114(env *ProcessEnv) (*ModuleJSON, bool, error) {
{{.GoVersion}}
{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}}
`
- stdout, err := env.invokeGo("list", "-m", "-f", format)
+ stdout, err := env.invokeGo(context.TODO(), "list", "-m", "-f", format)
if err != nil {
return nil, false, nil
}
diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go
index e44926ee5..5b4f03acc 100644
--- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go
+++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go
@@ -93,15 +93,86 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (
type dirInfoCache struct {
mu sync.Mutex
// dirs stores information about packages in directories, keyed by absolute path.
- dirs map[string]*directoryPackageInfo
+ dirs map[string]*directoryPackageInfo
+ listeners map[*int]cacheListener
+}
+
+type cacheListener func(directoryPackageInfo)
+
+// ScanAndListen calls listener on all the items in the cache, and on anything
+// newly added. The returned stop function waits for all in-flight callbacks to
+// finish and blocks new ones.
+func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
+ ctx, cancel := context.WithCancel(ctx)
+
+ // Flushing out all the callbacks is tricky without knowing how many there
+ // are going to be. Setting an arbitrary limit makes it much easier.
+ const maxInFlight = 10
+ sema := make(chan struct{}, maxInFlight)
+ for i := 0; i < maxInFlight; i++ {
+ sema <- struct{}{}
+ }
+
+ cookie := new(int) // A unique ID we can use for the listener.
+
+ // We can't hold mu while calling the listener.
+ d.mu.Lock()
+ var keys []string
+ for key := range d.dirs {
+ keys = append(keys, key)
+ }
+ d.listeners[cookie] = func(info directoryPackageInfo) {
+ select {
+ case <-ctx.Done():
+ return
+ case <-sema:
+ }
+ listener(info)
+ sema <- struct{}{}
+ }
+ d.mu.Unlock()
+
+ stop := func() {
+ cancel()
+ d.mu.Lock()
+ delete(d.listeners, cookie)
+ d.mu.Unlock()
+ for i := 0; i < maxInFlight; i++ {
+ <-sema
+ }
+ }
+
+ // Process the pre-existing keys.
+ for _, k := range keys {
+ select {
+ case <-ctx.Done():
+ return stop
+ default:
+ }
+ if v, ok := d.Load(k); ok {
+ listener(v)
+ }
+ }
+
+ return stop
}
// Store stores the package info for dir.
func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
d.mu.Lock()
- defer d.mu.Unlock()
- stored := info // defensive copy
- d.dirs[dir] = &stored
+ _, old := d.dirs[dir]
+ d.dirs[dir] = &info
+ var listeners []cacheListener
+ for _, l := range d.listeners {
+ listeners = append(listeners, l)
+ }
+ d.mu.Unlock()
+
+ if !old {
+ for _, l := range listeners {
+ l(info)
+ }
+ }
}
// Load returns a copy of the directoryPackageInfo for absolute directory dir.
@@ -145,7 +216,7 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d
if reached, err := info.reachedStatus(nameLoaded); reached && err != nil {
return "", nil, err
}
- info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir)
+ info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false)
if info.err == context.Canceled || info.err == context.DeadlineExceeded {
return info.packageName, info.exports, info.err
}
diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go
index 7e60eb04e..16252111f 100644
--- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go
+++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go
@@ -415,6 +415,9 @@ var stdlib = map[string][]string{
"crypto/tls": []string{
"Certificate",
"CertificateRequestInfo",
+ "CipherSuite",
+ "CipherSuiteName",
+ "CipherSuites",
"Client",
"ClientAuthType",
"ClientHelloInfo",
@@ -434,6 +437,7 @@ var stdlib = map[string][]string{
"ECDSAWithP521AndSHA512",
"ECDSAWithSHA1",
"Ed25519",
+ "InsecureCipherSuites",
"Listen",
"LoadX509KeyPair",
"NewLRUClientSessionCache",
@@ -465,6 +469,7 @@ var stdlib = map[string][]string{
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
+ "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
@@ -473,6 +478,7 @@ var stdlib = map[string][]string{
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
+ "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
"TLS_ECDHE_RSA_WITH_RC4_128_SHA",
"TLS_FALLBACK_SCSV",
"TLS_RSA_WITH_3DES_EDE_CBC_SHA",
@@ -698,36 +704,65 @@ var stdlib = map[string][]string{
"Attr",
"AttrAbstractOrigin",
"AttrAccessibility",
+ "AttrAddrBase",
"AttrAddrClass",
+ "AttrAlignment",
"AttrAllocated",
"AttrArtificial",
"AttrAssociated",
"AttrBaseTypes",
+ "AttrBinaryScale",
"AttrBitOffset",
"AttrBitSize",
"AttrByteSize",
+ "AttrCallAllCalls",
+ "AttrCallAllSourceCalls",
+ "AttrCallAllTailCalls",
"AttrCallColumn",
+ "AttrCallDataLocation",
+ "AttrCallDataValue",
"AttrCallFile",
"AttrCallLine",
+ "AttrCallOrigin",
+ "AttrCallPC",
+ "AttrCallParameter",
+ "AttrCallReturnPC",
+ "AttrCallTailCall",
+ "AttrCallTarget",
+ "AttrCallTargetClobbered",
+ "AttrCallValue",
"AttrCalling",
"AttrCommonRef",
"AttrCompDir",
+ "AttrConstExpr",
"AttrConstValue",
"AttrContainingType",
"AttrCount",
+ "AttrDataBitOffset",
"AttrDataLocation",
"AttrDataMemberLoc",
+ "AttrDecimalScale",
+ "AttrDecimalSign",
"AttrDeclColumn",
"AttrDeclFile",
"AttrDeclLine",
"AttrDeclaration",
"AttrDefaultValue",
+ "AttrDefaulted",
+ "AttrDeleted",
"AttrDescription",
+ "AttrDigitCount",
"AttrDiscr",
"AttrDiscrList",
"AttrDiscrValue",
+ "AttrDwoName",
+ "AttrElemental",
"AttrEncoding",
+ "AttrEndianity",
"AttrEntrypc",
+ "AttrEnumClass",
+ "AttrExplicit",
+ "AttrExportSymbols",
"AttrExtension",
"AttrExternal",
"AttrFrameBase",
@@ -738,27 +773,47 @@ var stdlib = map[string][]string{
"AttrInline",
"AttrIsOptional",
"AttrLanguage",
+ "AttrLinkageName",
"AttrLocation",
+ "AttrLoclistsBase",
"AttrLowerBound",
"AttrLowpc",
"AttrMacroInfo",
+ "AttrMacros",
+ "AttrMainSubprogram",
+ "AttrMutable",
"AttrName",
"AttrNamelistItem",
+ "AttrNoreturn",
+ "AttrObjectPointer",
"AttrOrdering",
+ "AttrPictureString",
"AttrPriority",
"AttrProducer",
"AttrPrototyped",
+ "AttrPure",
"AttrRanges",
+ "AttrRank",
+ "AttrRecursive",
+ "AttrReference",
"AttrReturnAddr",
+ "AttrRnglistsBase",
+ "AttrRvalueReference",
"AttrSegment",
"AttrSibling",
+ "AttrSignature",
+ "AttrSmall",
"AttrSpecification",
"AttrStartScope",
"AttrStaticLink",
"AttrStmtList",
+ "AttrStrOffsetsBase",
"AttrStride",
"AttrStrideSize",
"AttrStringLength",
+ "AttrStringLengthBitSize",
+ "AttrStringLengthByteSize",
+ "AttrThreadsScaled",
"AttrTrampoline",
"AttrType",
"AttrUpperBound",
@@ -772,18 +827,23 @@ var stdlib = map[string][]string{
"BoolType",
"CharType",
"Class",
+ "ClassAddrPtr",
"ClassAddress",
"ClassBlock",
"ClassConstant",
"ClassExprLoc",
"ClassFlag",
"ClassLinePtr",
+ "ClassLocList",
"ClassLocListPtr",
"ClassMacPtr",
"ClassRangeListPtr",
"ClassReference",
"ClassReferenceAlt",
"ClassReferenceSig",
+ "ClassRngList",
+ "ClassRngListsPtr",
+ "ClassStrOffsetsPtr",
"ClassString",
"ClassStringAlt",
"ClassUnknown",
@@ -814,9 +874,13 @@ var stdlib = map[string][]string{
"Tag",
"TagAccessDeclaration",
"TagArrayType",
+ "TagAtomicType",
"TagBaseType",
+ "TagCallSite",
+ "TagCallSiteParameter",
"TagCatchDwarfBlock",
"TagClassType",
+ "TagCoarrayType",
"TagCommonDwarfBlock",
"TagCommonInclusion",
"TagCompileUnit",
@@ -824,12 +888,15 @@ var stdlib = map[string][]string{
"TagConstType",
"TagConstant",
"TagDwarfProcedure",
+ "TagDynamicType",
"TagEntryPoint",
"TagEnumerationType",
"TagEnumerator",
"TagFileType",
"TagFormalParameter",
"TagFriend",
+ "TagGenericSubrange",
+ "TagImmutableType",
"TagImportedDeclaration",
"TagImportedModule",
"TagImportedUnit",
@@ -853,6 +920,7 @@ var stdlib = map[string][]string{
"TagRvalueReferenceType",
"TagSetType",
"TagSharedType",
+ "TagSkeletonUnit",
"TagStringType",
"TagStructType",
"TagSubprogram",
@@ -2359,6 +2427,7 @@ var stdlib = map[string][]string{
"RawValue",
"StructuralError",
"SyntaxError",
+ "TagBMPString",
"TagBitString",
"TagBoolean",
"TagEnum",
@@ -2787,6 +2856,7 @@ var stdlib = map[string][]string{
"IsPredeclared",
"Mode",
"New",
+ "NewFromFiles",
"Note",
"Package",
"PreserveAST",
@@ -3115,6 +3185,11 @@ var stdlib = map[string][]string{
"New64",
"New64a",
},
+ "hash/maphash": []string{
+ "Hash",
+ "MakeSeed",
+ "Seed",
+ },
"html": []string{
"EscapeString",
"UnescapeString",
@@ -3367,6 +3442,7 @@ var stdlib = map[string][]string{
"Ldate",
"Llongfile",
"Lmicroseconds",
+ "Lmsgprefix",
"Logger",
"Lshortfile",
"LstdFlags",
@@ -3443,6 +3519,7 @@ var stdlib = map[string][]string{
"Exp",
"Exp2",
"Expm1",
+ "FMA",
"Float32bits",
"Float32frombits",
"Float64bits",
@@ -3567,6 +3644,9 @@ var stdlib = map[string][]string{
"OnesCount32",
"OnesCount64",
"OnesCount8",
+ "Rem",
+ "Rem32",
+ "Rem64",
"Reverse",
"Reverse16",
"Reverse32",
@@ -5140,7 +5220,10 @@ var stdlib = map[string][]string{
"CTL_NET",
"CTL_QUERY",
"CTRL_BREAK_EVENT",
+ "CTRL_CLOSE_EVENT",
"CTRL_C_EVENT",
+ "CTRL_LOGOFF_EVENT",
+ "CTRL_SHUTDOWN_EVENT",
"CancelIo",
"CancelIoEx",
"CertAddCertificateContextToStore",
@@ -10112,6 +10195,7 @@ var stdlib = map[string][]string{
"Duployan",
"Egyptian_Hieroglyphs",
"Elbasan",
+ "Elymaic",
"Ethiopic",
"Extender",
"FoldCategory",
@@ -10215,6 +10299,7 @@ var stdlib = map[string][]string{
"Myanmar",
"N",
"Nabataean",
+ "Nandinagari",
"Nd",
"New_Tai_Lue",
"Newa",
@@ -10224,6 +10309,7 @@ var stdlib = map[string][]string{
"Noncharacter_Code_Point",
"Number",
"Nushu",
+ "Nyiakeng_Puachue_Hmong",
"Ogham",
"Ol_Chiki",
"Old_Hungarian",
@@ -10331,6 +10417,7 @@ var stdlib = map[string][]string{
"Vai",
"Variation_Selector",
"Version",
+ "Wancho",
"Warang_Citi",
"White_Space",
"Yi",
diff --git a/vendor/golang.org/x/tools/internal/module/module.go b/vendor/golang.org/x/tools/internal/module/module.go
deleted file mode 100644
index 9a4edb9de..000000000
--- a/vendor/golang.org/x/tools/internal/module/module.go
+++ /dev/null
@@ -1,540 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package module defines the module.Version type
-// along with support code.
-package module
-
-// IMPORTANT NOTE
-//
-// This file essentially defines the set of valid import paths for the go command.
-// There are many subtle considerations, including Unicode ambiguity,
-// security, network, and file system representations.
-//
-// This file also defines the set of valid module path and version combinations,
-// another topic with many subtle considerations.
-//
-// Changes to the semantics in this file require approval from rsc.
-
-import (
- "fmt"
- "sort"
- "strings"
- "unicode"
- "unicode/utf8"
-
- "golang.org/x/tools/internal/semver"
-)
-
-// A Version is defined by a module path and version pair.
-type Version struct {
- Path string
-
- // Version is usually a semantic version in canonical form.
- // There are two exceptions to this general rule.
- // First, the top-level target of a build has no specific version
- // and uses Version = "".
- // Second, during MVS calculations the version "none" is used
- // to represent the decision to take no version of a given module.
- Version string `json:",omitempty"`
-}
-
-// Check checks that a given module path, version pair is valid.
-// In addition to the path being a valid module path
-// and the version being a valid semantic version,
-// the two must correspond.
-// For example, the path "yaml/v2" only corresponds to
-// semantic versions beginning with "v2.".
-func Check(path, version string) error {
- if err := CheckPath(path); err != nil {
- return err
- }
- if !semver.IsValid(version) {
- return fmt.Errorf("malformed semantic version %v", version)
- }
- _, pathMajor, _ := SplitPathVersion(path)
- if !MatchPathMajor(version, pathMajor) {
- if pathMajor == "" {
- pathMajor = "v0 or v1"
- }
- if pathMajor[0] == '.' { // .v1
- pathMajor = pathMajor[1:]
- }
- return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor)
- }
- return nil
-}
-
-// firstPathOK reports whether r can appear in the first element of a module path.
-// The first element of the path must be an LDH domain name, at least for now.
-// To avoid case ambiguity, the domain name must be entirely lower case.
-func firstPathOK(r rune) bool {
- return r == '-' || r == '.' ||
- '0' <= r && r <= '9' ||
- 'a' <= r && r <= 'z'
-}
-
-// pathOK reports whether r can appear in an import path element.
-// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
-// This matches what "go get" has historically recognized in import paths.
-// TODO(rsc): We would like to allow Unicode letters, but that requires additional
-// care in the safe encoding (see note below).
-func pathOK(r rune) bool {
- if r < utf8.RuneSelf {
- return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' ||
- '0' <= r && r <= '9' ||
- 'A' <= r && r <= 'Z' ||
- 'a' <= r && r <= 'z'
- }
- return false
-}
-
-// fileNameOK reports whether r can appear in a file name.
-// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
-// If we expand the set of allowed characters here, we have to
-// work harder at detecting potential case-folding and normalization collisions.
-// See note about "safe encoding" below.
-func fileNameOK(r rune) bool {
- if r < utf8.RuneSelf {
- // Entire set of ASCII punctuation, from which we remove characters:
- // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
- // We disallow some shell special characters: " ' * < > ? ` |
- // (Note that some of those are disallowed by the Windows file system as well.)
- // We also disallow path separators / : and \ (fileNameOK is only called on path element characters).
- // We allow spaces (U+0020) in file names.
- const allowed = "!#$%&()+,-.=@[]^_{}~ "
- if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
- return true
- }
- for i := 0; i < len(allowed); i++ {
- if rune(allowed[i]) == r {
- return true
- }
- }
- return false
- }
- // It may be OK to add more ASCII punctuation here, but only carefully.
- // For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
- return unicode.IsLetter(r)
-}
-
-// CheckPath checks that a module path is valid.
-func CheckPath(path string) error {
- if err := checkPath(path, false); err != nil {
- return fmt.Errorf("malformed module path %q: %v", path, err)
- }
- i := strings.Index(path, "/")
- if i < 0 {
- i = len(path)
- }
- if i == 0 {
- return fmt.Errorf("malformed module path %q: leading slash", path)
- }
- if !strings.Contains(path[:i], ".") {
- return fmt.Errorf("malformed module path %q: missing dot in first path element", path)
- }
- if path[0] == '-' {
- return fmt.Errorf("malformed module path %q: leading dash in first path element", path)
- }
- for _, r := range path[:i] {
- if !firstPathOK(r) {
- return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r)
- }
- }
- if _, _, ok := SplitPathVersion(path); !ok {
- return fmt.Errorf("malformed module path %q: invalid version", path)
- }
- return nil
-}
-
-// CheckImportPath checks that an import path is valid.
-func CheckImportPath(path string) error {
- if err := checkPath(path, false); err != nil {
- return fmt.Errorf("malformed import path %q: %v", path, err)
- }
- return nil
-}
-
-// checkPath checks that a general path is valid.
-// It returns an error describing why but not mentioning path.
-// Because these checks apply to both module paths and import paths,
-// the caller is expected to add the "malformed ___ path %q: " prefix.
-// fileName indicates whether the final element of the path is a file name
-// (as opposed to a directory name).
-func checkPath(path string, fileName bool) error {
- if !utf8.ValidString(path) {
- return fmt.Errorf("invalid UTF-8")
- }
- if path == "" {
- return fmt.Errorf("empty string")
- }
- if strings.Contains(path, "..") {
- return fmt.Errorf("double dot")
- }
- if strings.Contains(path, "//") {
- return fmt.Errorf("double slash")
- }
- if path[len(path)-1] == '/' {
- return fmt.Errorf("trailing slash")
- }
- elemStart := 0
- for i, r := range path {
- if r == '/' {
- if err := checkElem(path[elemStart:i], fileName); err != nil {
- return err
- }
- elemStart = i + 1
- }
- }
- if err := checkElem(path[elemStart:], fileName); err != nil {
- return err
- }
- return nil
-}
-
-// checkElem checks whether an individual path element is valid.
-// fileName indicates whether the element is a file name (not a directory name).
-func checkElem(elem string, fileName bool) error {
- if elem == "" {
- return fmt.Errorf("empty path element")
- }
- if strings.Count(elem, ".") == len(elem) {
- return fmt.Errorf("invalid path element %q", elem)
- }
- if elem[0] == '.' && !fileName {
- return fmt.Errorf("leading dot in path element")
- }
- if elem[len(elem)-1] == '.' {
- return fmt.Errorf("trailing dot in path element")
- }
- charOK := pathOK
- if fileName {
- charOK = fileNameOK
- }
- for _, r := range elem {
- if !charOK(r) {
- return fmt.Errorf("invalid char %q", r)
- }
- }
-
- // Windows disallows a bunch of path elements, sadly.
- // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
- short := elem
- if i := strings.Index(short, "."); i >= 0 {
- short = short[:i]
- }
- for _, bad := range badWindowsNames {
- if strings.EqualFold(bad, short) {
- return fmt.Errorf("disallowed path element %q", elem)
- }
- }
- return nil
-}
-
-// CheckFilePath checks whether a slash-separated file path is valid.
-func CheckFilePath(path string) error {
- if err := checkPath(path, true); err != nil {
- return fmt.Errorf("malformed file path %q: %v", path, err)
- }
- return nil
-}
-
-// badWindowsNames are the reserved file path elements on Windows.
-// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
-var badWindowsNames = []string{
- "CON",
- "PRN",
- "AUX",
- "NUL",
- "COM1",
- "COM2",
- "COM3",
- "COM4",
- "COM5",
- "COM6",
- "COM7",
- "COM8",
- "COM9",
- "LPT1",
- "LPT2",
- "LPT3",
- "LPT4",
- "LPT5",
- "LPT6",
- "LPT7",
- "LPT8",
- "LPT9",
-}
-
-// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path
-// and version is either empty or "/vN" for N >= 2.
-// As a special case, gopkg.in paths are recognized directly;
-// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
-func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
- if strings.HasPrefix(path, "gopkg.in/") {
- return splitGopkgIn(path)
- }
-
- i := len(path)
- dot := false
- for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') {
- if path[i-1] == '.' {
- dot = true
- }
- i--
- }
- if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' {
- return path, "", true
- }
- prefix, pathMajor = path[:i-2], path[i-2:]
- if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" {
- return path, "", false
- }
- return prefix, pathMajor, true
-}
-
-// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths.
-func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
- if !strings.HasPrefix(path, "gopkg.in/") {
- return path, "", false
- }
- i := len(path)
- if strings.HasSuffix(path, "-unstable") {
- i -= len("-unstable")
- }
- for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') {
- i--
- }
- if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' {
- // All gopkg.in paths must end in vN for some N.
- return path, "", false
- }
- prefix, pathMajor = path[:i-2], path[i-2:]
- if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" {
- return path, "", false
- }
- return prefix, pathMajor, true
-}
-
-// MatchPathMajor reports whether the semantic version v
-// matches the path major version pathMajor.
-func MatchPathMajor(v, pathMajor string) bool {
- if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
- pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
- }
- if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
- // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
- // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
- return true
- }
- m := semver.Major(v)
- if pathMajor == "" {
- return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible"
- }
- return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:]
-}
-
-// CanonicalVersion returns the canonical form of the version string v.
-// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
-func CanonicalVersion(v string) string {
- cv := semver.Canonical(v)
- if semver.Build(v) == "+incompatible" {
- cv += "+incompatible"
- }
- return cv
-}
-
-// Sort sorts the list by Path, breaking ties by comparing Versions.
-func Sort(list []Version) {
- sort.Slice(list, func(i, j int) bool {
- mi := list[i]
- mj := list[j]
- if mi.Path != mj.Path {
- return mi.Path < mj.Path
- }
- // To help go.sum formatting, allow version/file.
- // Compare semver prefix by semver rules,
- // file by string order.
- vi := mi.Version
- vj := mj.Version
- var fi, fj string
- if k := strings.Index(vi, "/"); k >= 0 {
- vi, fi = vi[:k], vi[k:]
- }
- if k := strings.Index(vj, "/"); k >= 0 {
- vj, fj = vj[:k], vj[k:]
- }
- if vi != vj {
- return semver.Compare(vi, vj) < 0
- }
- return fi < fj
- })
-}
-
-// Safe encodings
-//
-// Module paths appear as substrings of file system paths
-// (in the download cache) and of web server URLs in the proxy protocol.
-// In general we cannot rely on file systems to be case-sensitive,
-// nor can we rely on web servers, since they read from file systems.
-// That is, we cannot rely on the file system to keep rsc.io/QUOTE
-// and rsc.io/quote separate. Windows and macOS don't.
-// Instead, we must never require two different casings of a file path.
-// Because we want the download cache to match the proxy protocol,
-// and because we want the proxy protocol to be possible to serve
-// from a tree of static files (which might be stored on a case-insensitive
-// file system), the proxy protocol must never require two different casings
-// of a URL path either.
-//
-// One possibility would be to make the safe encoding be the lowercase
-// hexadecimal encoding of the actual path bytes. This would avoid ever
-// needing different casings of a file path, but it would be fairly illegible
-// to most programmers when those paths appeared in the file system
-// (including in file paths in compiler errors and stack traces)
-// in web server logs, and so on. Instead, we want a safe encoding that
-// leaves most paths unaltered.
-//
-// The safe encoding is this:
-// replace every uppercase letter with an exclamation mark
-// followed by the letter's lowercase equivalent.
-//
-// For example,
-// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
-// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
-// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
-//
-// Import paths that avoid upper-case letters are left unchanged.
-// Note that because import paths are ASCII-only and avoid various
-// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
-// and avoids the same problematic punctuation.
-//
-// Import paths have never allowed exclamation marks, so there is no
-// need to define how to encode a literal !.
-//
-// Although paths are disallowed from using Unicode (see pathOK above),
-// the eventual plan is to allow Unicode letters as well, to assume that
-// file systems and URLs are Unicode-safe (storing UTF-8), and apply
-// the !-for-uppercase convention. Note however that not all runes that
-// are different but case-fold equivalent are an upper/lower pair.
-// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
-// are considered to case-fold to each other. When we do add Unicode
-// letters, we must not assume that upper/lower are the only case-equivalent pairs.
-// Perhaps the Kelvin symbol would be disallowed entirely, for example.
-// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
-//
-// Also, it would be nice to allow Unicode marks as well as letters,
-// but marks include combining marks, and then we must deal not
-// only with case folding but also normalization: both U+00E9 ('é')
-// and U+0065 U+0301 ('e' followed by combining acute accent)
-// look the same on the page and are treated by some file systems
-// as the same path. If we do allow Unicode marks in paths, there
-// must be some kind of normalization to allow only one canonical
-// encoding of any character used in an import path.
-
-// EncodePath returns the safe encoding of the given module path.
-// It fails if the module path is invalid.
-func EncodePath(path string) (encoding string, err error) {
- if err := CheckPath(path); err != nil {
- return "", err
- }
-
- return encodeString(path)
-}
-
-// EncodeVersion returns the safe encoding of the given module version.
-// Versions are allowed to be in non-semver form but must be valid file names
-// and not contain exclamation marks.
-func EncodeVersion(v string) (encoding string, err error) {
- if err := checkElem(v, true); err != nil || strings.Contains(v, "!") {
- return "", fmt.Errorf("disallowed version string %q", v)
- }
- return encodeString(v)
-}
-
-func encodeString(s string) (encoding string, err error) {
- haveUpper := false
- for _, r := range s {
- if r == '!' || r >= utf8.RuneSelf {
- // This should be disallowed by CheckPath, but diagnose anyway.
- // The correctness of the encoding loop below depends on it.
- return "", fmt.Errorf("internal error: inconsistency in EncodePath")
- }
- if 'A' <= r && r <= 'Z' {
- haveUpper = true
- }
- }
-
- if !haveUpper {
- return s, nil
- }
-
- var buf []byte
- for _, r := range s {
- if 'A' <= r && r <= 'Z' {
- buf = append(buf, '!', byte(r+'a'-'A'))
- } else {
- buf = append(buf, byte(r))
- }
- }
- return string(buf), nil
-}
-
-// DecodePath returns the module path of the given safe encoding.
-// It fails if the encoding is invalid or encodes an invalid path.
-func DecodePath(encoding string) (path string, err error) {
- path, ok := decodeString(encoding)
- if !ok {
- return "", fmt.Errorf("invalid module path encoding %q", encoding)
- }
- if err := CheckPath(path); err != nil {
- return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err)
- }
- return path, nil
-}
-
-// DecodeVersion returns the version string for the given safe encoding.
-// It fails if the encoding is invalid or encodes an invalid version.
-// Versions are allowed to be in non-semver form but must be valid file names
-// and not contain exclamation marks.
-func DecodeVersion(encoding string) (v string, err error) {
- v, ok := decodeString(encoding)
- if !ok {
- return "", fmt.Errorf("invalid version encoding %q", encoding)
- }
- if err := checkElem(v, true); err != nil {
- return "", fmt.Errorf("disallowed version string %q", v)
- }
- return v, nil
-}
-
-func decodeString(encoding string) (string, bool) {
- var buf []byte
-
- bang := false
- for _, r := range encoding {
- if r >= utf8.RuneSelf {
- return "", false
- }
- if bang {
- bang = false
- if r < 'a' || 'z' < r {
- return "", false
- }
- buf = append(buf, byte(r+'A'-'a'))
- continue
- }
- if r == '!' {
- bang = true
- continue
- }
- if 'A' <= r && r <= 'Z' {
- return "", false
- }
- buf = append(buf, byte(r))
- }
- if bang {
- return "", false
- }
- return string(buf), true
-}
diff --git a/vendor/golang.org/x/tools/internal/semver/semver.go b/vendor/golang.org/x/tools/internal/semver/semver.go
deleted file mode 100644
index 4af7118e5..000000000
--- a/vendor/golang.org/x/tools/internal/semver/semver.go
+++ /dev/null
@@ -1,388 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package semver implements comparison of semantic version strings.
-// In this package, semantic version strings must begin with a leading "v",
-// as in "v1.0.0".
-//
-// The general form of a semantic version string accepted by this package is
-//
-// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
-//
-// where square brackets indicate optional parts of the syntax;
-// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
-// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
-// using only alphanumeric characters and hyphens; and
-// all-numeric PRERELEASE identifiers must not have leading zeros.
-//
-// This package follows Semantic Versioning 2.0.0 (see semver.org)
-// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
-// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
-// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
-package semver
-
-// parsed returns the parsed form of a semantic version string.
-type parsed struct {
- major string
- minor string
- patch string
- short string
- prerelease string
- build string
- err string
-}
-
-// IsValid reports whether v is a valid semantic version string.
-func IsValid(v string) bool {
- _, ok := parse(v)
- return ok
-}
-
-// Canonical returns the canonical formatting of the semantic version v.
-// It fills in any missing .MINOR or .PATCH and discards build metadata.
-// Two semantic versions compare equal only if their canonical formattings
-// are identical strings.
-// The canonical invalid semantic version is the empty string.
-func Canonical(v string) string {
- p, ok := parse(v)
- if !ok {
- return ""
- }
- if p.build != "" {
- return v[:len(v)-len(p.build)]
- }
- if p.short != "" {
- return v + p.short
- }
- return v
-}
-
-// Major returns the major version prefix of the semantic version v.
-// For example, Major("v2.1.0") == "v2".
-// If v is an invalid semantic version string, Major returns the empty string.
-func Major(v string) string {
- pv, ok := parse(v)
- if !ok {
- return ""
- }
- return v[:1+len(pv.major)]
-}
-
-// MajorMinor returns the major.minor version prefix of the semantic version v.
-// For example, MajorMinor("v2.1.0") == "v2.1".
-// If v is an invalid semantic version string, MajorMinor returns the empty string.
-func MajorMinor(v string) string {
- pv, ok := parse(v)
- if !ok {
- return ""
- }
- i := 1 + len(pv.major)
- if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
- return v[:j]
- }
- return v[:i] + "." + pv.minor
-}
-
-// Prerelease returns the prerelease suffix of the semantic version v.
-// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
-// If v is an invalid semantic version string, Prerelease returns the empty string.
-func Prerelease(v string) string {
- pv, ok := parse(v)
- if !ok {
- return ""
- }
- return pv.prerelease
-}
-
-// Build returns the build suffix of the semantic version v.
-// For example, Build("v2.1.0+meta") == "+meta".
-// If v is an invalid semantic version string, Build returns the empty string.
-func Build(v string) string {
- pv, ok := parse(v)
- if !ok {
- return ""
- }
- return pv.build
-}
-
-// Compare returns an integer comparing two versions according to
-// according to semantic version precedence.
-// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
-//
-// An invalid semantic version string is considered less than a valid one.
-// All invalid semantic version strings compare equal to each other.
-func Compare(v, w string) int {
- pv, ok1 := parse(v)
- pw, ok2 := parse(w)
- if !ok1 && !ok2 {
- return 0
- }
- if !ok1 {
- return -1
- }
- if !ok2 {
- return +1
- }
- if c := compareInt(pv.major, pw.major); c != 0 {
- return c
- }
- if c := compareInt(pv.minor, pw.minor); c != 0 {
- return c
- }
- if c := compareInt(pv.patch, pw.patch); c != 0 {
- return c
- }
- return comparePrerelease(pv.prerelease, pw.prerelease)
-}
-
-// Max canonicalizes its arguments and then returns the version string
-// that compares greater.
-func Max(v, w string) string {
- v = Canonical(v)
- w = Canonical(w)
- if Compare(v, w) > 0 {
- return v
- }
- return w
-}
-
-func parse(v string) (p parsed, ok bool) {
- if v == "" || v[0] != 'v' {
- p.err = "missing v prefix"
- return
- }
- p.major, v, ok = parseInt(v[1:])
- if !ok {
- p.err = "bad major version"
- return
- }
- if v == "" {
- p.minor = "0"
- p.patch = "0"
- p.short = ".0.0"
- return
- }
- if v[0] != '.' {
- p.err = "bad minor prefix"
- ok = false
- return
- }
- p.minor, v, ok = parseInt(v[1:])
- if !ok {
- p.err = "bad minor version"
- return
- }
- if v == "" {
- p.patch = "0"
- p.short = ".0"
- return
- }
- if v[0] != '.' {
- p.err = "bad patch prefix"
- ok = false
- return
- }
- p.patch, v, ok = parseInt(v[1:])
- if !ok {
- p.err = "bad patch version"
- return
- }
- if len(v) > 0 && v[0] == '-' {
- p.prerelease, v, ok = parsePrerelease(v)
- if !ok {
- p.err = "bad prerelease"
- return
- }
- }
- if len(v) > 0 && v[0] == '+' {
- p.build, v, ok = parseBuild(v)
- if !ok {
- p.err = "bad build"
- return
- }
- }
- if v != "" {
- p.err = "junk on end"
- ok = false
- return
- }
- ok = true
- return
-}
-
-func parseInt(v string) (t, rest string, ok bool) {
- if v == "" {
- return
- }
- if v[0] < '0' || '9' < v[0] {
- return
- }
- i := 1
- for i < len(v) && '0' <= v[i] && v[i] <= '9' {
- i++
- }
- if v[0] == '0' && i != 1 {
- return
- }
- return v[:i], v[i:], true
-}
-
-func parsePrerelease(v string) (t, rest string, ok bool) {
- // "A pre-release version MAY be denoted by appending a hyphen and
- // a series of dot separated identifiers immediately following the patch version.
- // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
- // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
- if v == "" || v[0] != '-' {
- return
- }
- i := 1
- start := 1
- for i < len(v) && v[i] != '+' {
- if !isIdentChar(v[i]) && v[i] != '.' {
- return
- }
- if v[i] == '.' {
- if start == i || isBadNum(v[start:i]) {
- return
- }
- start = i + 1
- }
- i++
- }
- if start == i || isBadNum(v[start:i]) {
- return
- }
- return v[:i], v[i:], true
-}
-
-func parseBuild(v string) (t, rest string, ok bool) {
- if v == "" || v[0] != '+' {
- return
- }
- i := 1
- start := 1
- for i < len(v) {
- if !isIdentChar(v[i]) {
- return
- }
- if v[i] == '.' {
- if start == i {
- return
- }
- start = i + 1
- }
- i++
- }
- if start == i {
- return
- }
- return v[:i], v[i:], true
-}
-
-func isIdentChar(c byte) bool {
- return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
-}
-
-func isBadNum(v string) bool {
- i := 0
- for i < len(v) && '0' <= v[i] && v[i] <= '9' {
- i++
- }
- return i == len(v) && i > 1 && v[0] == '0'
-}
-
-func isNum(v string) bool {
- i := 0
- for i < len(v) && '0' <= v[i] && v[i] <= '9' {
- i++
- }
- return i == len(v)
-}
-
-func compareInt(x, y string) int {
- if x == y {
- return 0
- }
- if len(x) < len(y) {
- return -1
- }
- if len(x) > len(y) {
- return +1
- }
- if x < y {
- return -1
- } else {
- return +1
- }
-}
-
-func comparePrerelease(x, y string) int {
- // "When major, minor, and patch are equal, a pre-release version has
- // lower precedence than a normal version.
- // Example: 1.0.0-alpha < 1.0.0.
- // Precedence for two pre-release versions with the same major, minor,
- // and patch version MUST be determined by comparing each dot separated
- // identifier from left to right until a difference is found as follows:
- // identifiers consisting of only digits are compared numerically and
- // identifiers with letters or hyphens are compared lexically in ASCII
- // sort order. Numeric identifiers always have lower precedence than
- // non-numeric identifiers. A larger set of pre-release fields has a
- // higher precedence than a smaller set, if all of the preceding
- // identifiers are equal.
- // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
- // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
- if x == y {
- return 0
- }
- if x == "" {
- return +1
- }
- if y == "" {
- return -1
- }
- for x != "" && y != "" {
- x = x[1:] // skip - or .
- y = y[1:] // skip - or .
- var dx, dy string
- dx, x = nextIdent(x)
- dy, y = nextIdent(y)
- if dx != dy {
- ix := isNum(dx)
- iy := isNum(dy)
- if ix != iy {
- if ix {
- return -1
- } else {
- return +1
- }
- }
- if ix {
- if len(dx) < len(dy) {
- return -1
- }
- if len(dx) > len(dy) {
- return +1
- }
- }
- if dx < dy {
- return -1
- } else {
- return +1
- }
- }
- }
- if x == "" {
- return -1
- } else {
- return +1
- }
-}
-
-func nextIdent(x string) (dx, rest string) {
- i := 0
- for i < len(x) && x[i] != '.' {
- i++
- }
- return x[:i], x[i:]
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 9a054dd58..8624296e8 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1080,6 +1080,9 @@ golang.org/x/exp/cmd/apidiff
# golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f
golang.org/x/lint
golang.org/x/lint/golint
+# golang.org/x/mod v0.2.0
+golang.org/x/mod/module
+golang.org/x/mod/semver
# golang.org/x/net v0.0.0-20200301022130-244492dfa37a
golang.org/x/net/context
golang.org/x/net/context/ctxhttp
@@ -1119,7 +1122,7 @@ golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
# golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
golang.org/x/time/rate
-# golang.org/x/tools v0.0.0-20200108203644-89082a384178
+# golang.org/x/tools v0.0.0-20200430192856-2840dafb9ee1
golang.org/x/tools/cmd/goimports
golang.org/x/tools/cmd/stringer
golang.org/x/tools/go/analysis
@@ -1134,11 +1137,16 @@ golang.org/x/tools/go/packages
golang.org/x/tools/go/types/objectpath
golang.org/x/tools/go/types/typeutil
golang.org/x/tools/imports
+golang.org/x/tools/internal/analysisinternal
+golang.org/x/tools/internal/event
+golang.org/x/tools/internal/event/core
+golang.org/x/tools/internal/event/keys
+golang.org/x/tools/internal/event/label
golang.org/x/tools/internal/fastwalk
+golang.org/x/tools/internal/gocommand
golang.org/x/tools/internal/gopathwalk
golang.org/x/tools/internal/imports
-golang.org/x/tools/internal/module
-golang.org/x/tools/internal/semver
+golang.org/x/tools/internal/packagesinternal
# golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
golang.org/x/xerrors
golang.org/x/xerrors/internal
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment