aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/golang.org/x/tools/go/packages
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/golang.org/x/tools/go/packages')
-rw-r--r--vendor/golang.org/x/tools/go/packages/doc.go269
-rw-r--r--vendor/golang.org/x/tools/go/packages/external.go68
-rw-r--r--vendor/golang.org/x/tools/go/packages/golist.go337
-rw-r--r--vendor/golang.org/x/tools/go/packages/golist_fallback.go282
-rw-r--r--vendor/golang.org/x/tools/go/packages/packages.go824
5 files changed, 1780 insertions, 0 deletions
diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go
new file mode 100644
index 00000000..4f5a1a14
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/doc.go
@@ -0,0 +1,269 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package packages loads Go packages for inspection and analysis.
+
+NOTE: THIS PACKAGE IS NOT YET READY FOR WIDESPREAD USE:
+ - The interface is still being revised and is likely to change.
+ - The implementation depends on the Go 1.11 go command.
+ - We intend to finalize the API before Go 1.11 is released.
+
+The Load function takes as input a list of patterns and return a list of Package
+structs describing individual packages matched by those patterns.
+The LoadMode controls the amounts of detail about the loaded packages.
+
+The patterns are used as arguments to the underlying build tool,
+such as the go command or Bazel, and are interpreted according to
+that tool's conventions.
+
+The Package struct provides basic information about the package, including
+
+ - ID, a unique identifier for the package in the returned set;
+ - GoFiles, the names of the package's Go source files;
+ - Imports, a map from source import strings to the Packages they name;
+ - Types, the type information for the package's exported symbols;
+ - Syntax, the parsed syntax trees for the package's source code; and
+ - TypeInfo, the result of a complete type-check of the package syntax trees.
+
+(See the documentation for type Package for the complete list of fields
+and more detailed descriptions.)
+
+For example,
+
+ Load(nil, "bytes", "unicode...")
+
+returns four Package structs describing the standard library packages
+bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
+can match multiple packages and that a package might be matched by
+multiple patterns: in general it is not possible to determine which
+packages correspond to which patterns.
+
+Note that the list returned by Load (LoadAllSyntax in this case)
+only contains the packages matched by the patterns. Their dependencies
+can be found by walking the import graph using the Imports fields.
+
+The Load function can be configured by passing a non-nil Config struct as
+the first argument. If you pass nil for the Config Load will
+run in LoadAllSyntax mode, collecting the maximal amount of information
+it can.
+See the documentation for type Config for details.
+
+As noted earlier, the Config.Mode controls increasing amounts of detail
+about the loaded packages, with each mode returning all the data of the
+previous mode with some extra added. See the documentation for type LoadMode
+for details.
+
+Most tools should pass their command-line arguments (after any flags)
+uninterpreted to the loader, so that the loader can interpret them
+according to the conventions of the underlying build system.
+For example, this program prints the names of the source files
+for each package listed on the command line:
+
+ package main
+
+ import (
+ "flag"
+ "fmt"
+ "log"
+
+ "golang.org/x/tools/go/packages"
+ )
+
+ func main() {
+ flag.Parse()
+ pkgs, err := packages.Load(nil, flag.Args()...)
+ if err != nil {
+ log.Fatal(err)
+ }
+ for _, pkg := range pkgs {
+ fmt.Print(pkg.ID, pkg.GoFiles)
+ }
+ }
+*/
+package packages // import "golang.org/x/tools/go/packages"
+
+/*
+
+Motivation and design considerations
+
+The new package's design solves problems addressed by two existing
+packages: go/build, which locates and describes packages, and
+golang.org/x/tools/go/loader, which loads, parses and type-checks them.
+The go/build.Package structure encodes too much of the 'go build' way
+of organizing projects, leaving us in need of a data type that describes a
+package of Go source code independent of the underlying build system.
+We wanted something that works equally well with go build and vgo, and
+also other build systems such as Bazel and Blaze, making it possible to
+construct analysis tools that work in all these environments.
+Tools such as errcheck and staticcheck were essentially unavailable to
+the Go community at Google, and some of Google's internal tools for Go
+are unavailable externally.
+This new package provides a uniform way to obtain package metadata by
+querying each of these build systems, optionally supporting their
+preferred command-line notations for packages, so that tools integrate
+neatly with users' build environments. The Metadata query function
+executes an external query tool appropriate to the current workspace.
+
+Loading packages always returns the complete import graph "all the way down",
+even if all you want is information about a single package, because the query
+mechanisms of all the build systems we currently support ({go,vgo} list, and
+blaze/bazel aspect-based query) cannot provide detailed information
+about one package without visiting all its dependencies too, so there is
+no additional asymptotic cost to providing transitive information.
+(This property might not be true of a hypothetical 5th build system.)
+
+This package provides no parse-but-don't-typecheck operation because most tools
+that need only untyped syntax (such as gofmt, goimports, and golint)
+seem not to care about any files other than the ones they are directly
+instructed to look at. Also, it is trivial for a client to supplement
+this functionality on top of a Metadata query.
+
+In calls to TypeCheck, all initial packages, and any package that
+transitively depends on one of them, must be loaded from source.
+Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
+source; D may be loaded from export data, and E may not be loaded at all
+(though it's possible that D's export data mentions it, so a
+types.Package may be created for it and exposed.)
+
+The old loader had a feature to suppress type-checking of function
+bodies on a per-package basis, primarily intended to reduce the work of
+obtaining type information for imported packages. Now that imports are
+satisfied by export data, the optimization no longer seems necessary.
+
+Despite some early attempts, the old loader did not exploit export data,
+instead always using the equivalent of WholeProgram mode. This was due
+to the complexity of mixing source and export data packages (now
+resolved by the upward traversal mentioned above), and because export data
+files were nearly always missing or stale. Now that 'go build' supports
+caching, all the underlying build systems can guarantee to produce
+export data in a reasonable (amortized) time.
+
+Test "main" packages synthesized by the build system are now reported as
+first-class packages, avoiding the need for clients (such as go/ssa) to
+reinvent this generation logic.
+
+One way in which go/packages is simpler than the old loader is in its
+treatment of in-package tests. In-package tests are packages that
+consist of all the files of the library under test, plus the test files.
+The old loader constructed in-package tests by a two-phase process of
+mutation called "augmentation": first it would construct and type check
+all the ordinary library packages and type-check the packages that
+depend on them; then it would add more (test) files to the package and
+type-check again. This two-phase approach had four major problems:
+1) in processing the tests, the loader modified the library package,
+ leaving no way for a client application to see both the test
+ package and the library package; one would mutate into the other.
+2) because test files can declare additional methods on types defined in
+ the library portion of the package, the dispatch of method calls in
+ the library portion was affected by the presence of the test files.
+ This should have been a clue that the packages were logically
+ different.
+3) this model of "augmentation" assumed at most one in-package test
+ per library package, which is true of projects using 'go build',
+ but not other build systems.
+4) because of the two-phase nature of test processing, all packages that
+ import the library package had to be processed before augmentation,
+ forcing a "one-shot" API and preventing the client from calling Load
+ in several times in sequence as is now possible in WholeProgram mode.
+ (TypeCheck mode has a similar one-shot restriction for a different reason.)
+
+Early drafts of this package supported "multi-shot" operation
+in the Metadata and WholeProgram modes, although this feature is not exposed
+through the API and will likely be removed.
+Although it allowed clients to make a sequence of calls (or concurrent
+calls) to Load, building up the graph of Packages incrementally,
+it was of marginal value: it complicated the API
+(since it allowed some options to vary across calls but not others),
+it complicated the implementation,
+it cannot be made to work in TypeCheck mode, as explained above,
+and it was less efficient than making one combined call (when this is possible).
+Among the clients we have inspected, none made multiple calls to load
+but could not be easily and satisfactorily modified to make only a single call.
+However, applications changes may be required.
+For example, the ssadump command loads the user-specified packages
+and in addition the runtime package. It is tempting to simply append
+"runtime" to the user-provided list, but that does not work if the user
+specified an ad-hoc package such as [a.go b.go].
+Instead, ssadump no longer requests the runtime package,
+but seeks it among the dependencies of the user-specified packages,
+and emits an error if it is not found.
+
+Overlays: the ParseFile hook in the API permits clients to vary the way
+in which ASTs are obtained from filenames; the default implementation is
+based on parser.ParseFile. This features enables editor-integrated tools
+that analyze the contents of modified but unsaved buffers: rather than
+read from the file system, a tool can read from an archive of modified
+buffers provided by the editor.
+This approach has its limits. Because package metadata is obtained by
+fork/execing an external query command for each build system, we can
+fake only the file contents seen by the parser, type-checker, and
+application, but not by the metadata query, so, for example:
+- additional imports in the fake file will not be described by the
+ metadata, so the type checker will fail to load imports that create
+ new dependencies.
+- in TypeCheck mode, because export data is produced by the query
+ command, it will not reflect the fake file contents.
+- this mechanism cannot add files to a package without first saving them.
+
+Questions & Tasks
+
+- Add GOARCH/GOOS?
+ They are not portable concepts, but could be made portable.
+ Our goal has been to allow users to express themselves using the conventions
+ of the underlying build system: if the build system honors GOARCH
+ during a build and during a metadata query, then so should
+ applications built atop that query mechanism.
+ Conversely, if the target architecture of the build is determined by
+ command-line flags, the application can pass the relevant
+ flags through to the build system using a command such as:
+ myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
+ However, this approach is low-level, unwieldy, and non-portable.
+ GOOS and GOARCH seem important enough to warrant a dedicated option.
+
+- How should we handle partial failures such as a mixture of good and
+ malformed patterns, existing and non-existent packages, successful and
+ failed builds, import failures, import cycles, and so on, in a call to
+ Load?
+
+- Do we need a GeneratedBy map that maps the name of each generated Go
+ source file in GoFiles to that of the original file, if known, or "" otherwise?
+ Or are //line directives and "Generated" comments in those files enough?
+
+- Support bazel, blaze, and go1.10 list, not just go1.11 list.
+
+- Handle (and test) various partial success cases, e.g.
+ a mixture of good packages and:
+ invalid patterns
+ nonexistent packages
+ empty packages
+ packages with malformed package or import declarations
+ unreadable files
+ import cycles
+ other parse errors
+ type errors
+ Make sure we record errors at the correct place in the graph.
+
+- Missing packages among initial arguments are not reported.
+ Return bogus packages for them, like golist does.
+
+- "undeclared name" errors (for example) are reported out of source file
+ order. I suspect this is due to the breadth-first resolution now used
+ by go/types. Is that a bug? Discuss with gri.
+
+- https://github.com/golang/go/issues/25980 causes these commands to crash:
+ $ GOPATH=/none ./gopackages -all all
+ due to:
+ $ GOPATH=/none go list -e -test -json all
+ and:
+ $ go list -e -test ./relative/path
+
+- Modify stringer to use go/packages, perhaps initially under flag control.
+
+- Bug: "gopackages fmt a.go" doesn't produce an error.
+
+- If necessary, add back an IsTest boolean or expose ForTests on the Package struct.
+ IsTest was removed because we couldn't agree on a useful definition.
+
+*/
diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go
new file mode 100644
index 00000000..39e5ed99
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/external.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file enables an external tool to intercept package requests.
+// If the tool is present then its results are used in preference to
+// the go list command.
+
+package packages
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os/exec"
+ "strings"
+)
+
+// findExternalTool returns the file path of a tool that supplies supplies
+// the build system package structure, or "" if not found."
+// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
+// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
+func findExternalDriver(cfg *Config) driver {
+ const toolPrefix = "GOPACKAGESDRIVER="
+ tool := ""
+ for _, env := range cfg.Env {
+ if val := strings.TrimPrefix(env, toolPrefix); val != env {
+ tool = val
+ }
+ }
+ if tool != "" && tool == "off" {
+ return nil
+ }
+ if tool == "" {
+ var err error
+ tool, err = exec.LookPath("gopackagesdriver")
+ if err != nil {
+ return nil
+ }
+ }
+ return func(cfg *Config, words ...string) (*driverResponse, error) {
+ buf := new(bytes.Buffer)
+ fullargs := []string{
+ "list",
+ fmt.Sprintf("-test=%t", cfg.Tests),
+ fmt.Sprintf("-export=%t", usesExportData(cfg)),
+ fmt.Sprintf("-deps=%t", cfg.Mode >= LoadImports),
+ }
+ for _, f := range cfg.Flags {
+ fullargs = append(fullargs, fmt.Sprintf("-flags=%v", f))
+ }
+ fullargs = append(fullargs, "--")
+ fullargs = append(fullargs, words...)
+ cmd := exec.CommandContext(cfg.Context, tool, fullargs...)
+ cmd.Env = cfg.Env
+ cmd.Dir = cfg.Dir
+ cmd.Stdout = buf
+ cmd.Stderr = new(bytes.Buffer)
+ if err := cmd.Run(); err != nil {
+ return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
+ }
+ var response driverResponse
+ if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
+ return nil, err
+ }
+ return &response, nil
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
new file mode 100644
index 00000000..26d62771
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -0,0 +1,337 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+)
+
+// A goTooOldError reports that the go command
+// found by exec.LookPath is too old to use the new go list behavior.
+type goTooOldError struct {
+ error
+}
+
+// goListDriver uses the go list command to interpret the patterns and produce
+// the build system package structure.
+// See driver for more details.
+func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
+ // Determine files requested in contains patterns
+ var containFiles []string
+ restPatterns := make([]string, 0, len(patterns))
+ for _, pattern := range patterns {
+ if strings.HasPrefix(pattern, "contains:") {
+ containFile := strings.TrimPrefix(pattern, "contains:")
+ containFiles = append(containFiles, containFile)
+ } else {
+ restPatterns = append(restPatterns, pattern)
+ }
+ }
+ containFiles = absJoin(cfg.Dir, containFiles)
+ patterns = restPatterns
+
+ // TODO(matloob): Remove the definition of listfunc and just use golistPackages once go1.12 is released.
+ var listfunc driver
+ listfunc = func(cfg *Config, words ...string) (*driverResponse, error) {
+ response, err := golistDriverCurrent(cfg, patterns...)
+ if _, ok := err.(goTooOldError); ok {
+ listfunc = golistDriverFallback
+ return listfunc(cfg, patterns...)
+ }
+ listfunc = golistDriverCurrent
+ return response, err
+ }
+
+ var response *driverResponse
+ var err error
+
+ // see if we have any patterns to pass through to go list.
+ if len(patterns) > 0 {
+ response, err = listfunc(cfg, patterns...)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ response = &driverResponse{}
+ }
+
+ // Run go list for contains: patterns.
+ seenPkgs := make(map[string]*Package) // for deduplication. different containing queries could produce same packages
+ if len(containFiles) > 0 {
+ for _, pkg := range response.Packages {
+ seenPkgs[pkg.ID] = pkg
+ }
+ }
+ for _, f := range containFiles {
+ // TODO(matloob): Do only one query per directory.
+ fdir := filepath.Dir(f)
+ cfg.Dir = fdir
+ dirResponse, err := listfunc(cfg, ".")
+ if err != nil {
+ return nil, err
+ }
+ isRoot := make(map[string]bool, len(dirResponse.Roots))
+ for _, root := range dirResponse.Roots {
+ isRoot[root] = true
+ }
+ for _, pkg := range dirResponse.Packages {
+ // Add any new packages to the main set
+ // We don't bother to filter packages that will be dropped by the changes of roots,
+ // that will happen anyway during graph construction outside this function.
+ // Over-reporting packages is not a problem.
+ if _, ok := seenPkgs[pkg.ID]; !ok {
+ // it is a new package, just add it
+ seenPkgs[pkg.ID] = pkg
+ response.Packages = append(response.Packages, pkg)
+ }
+ // if the package was not a root one, it cannot have the file
+ if !isRoot[pkg.ID] {
+ continue
+ }
+ for _, pkgFile := range pkg.GoFiles {
+ if filepath.Base(f) == filepath.Base(pkgFile) {
+ response.Roots = append(response.Roots, pkg.ID)
+ break
+ }
+ }
+ }
+ }
+ return response, nil
+}
+
+// Fields must match go list;
+// see $GOROOT/src/cmd/go/internal/load/pkg.go.
+type jsonPackage struct {
+ ImportPath string
+ Dir string
+ Name string
+ Export string
+ GoFiles []string
+ CompiledGoFiles []string
+ CFiles []string
+ CgoFiles []string
+ CXXFiles []string
+ MFiles []string
+ HFiles []string
+ FFiles []string
+ SFiles []string
+ SwigFiles []string
+ SwigCXXFiles []string
+ SysoFiles []string
+ Imports []string
+ ImportMap map[string]string
+ Deps []string
+ TestGoFiles []string
+ TestImports []string
+ XTestGoFiles []string
+ XTestImports []string
+ ForTest string // q in a "p [q.test]" package, else ""
+ DepOnly bool
+}
+
+func otherFiles(p *jsonPackage) [][]string {
+ return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
+}
+
+// golistDriverCurrent uses the "go list" command to expand the
+// pattern words and return metadata for the specified packages.
+// dir may be "" and env may be nil, as per os/exec.Command.
+func golistDriverCurrent(cfg *Config, words ...string) (*driverResponse, error) {
+ // go list uses the following identifiers in ImportPath and Imports:
+ //
+ // "p" -- importable package or main (command)
+ // "q.test" -- q's test executable
+ // "p [q.test]" -- variant of p as built for q's test executable
+ // "q_test [q.test]" -- q's external test package
+ //
+ // The packages p that are built differently for a test q.test
+ // are q itself, plus any helpers used by the external test q_test,
+ // typically including "testing" and all its dependencies.
+
+ // Run "go list" for complete
+ // information on the specified packages.
+ buf, err := golist(cfg, golistargs(cfg, words))
+ if err != nil {
+ return nil, err
+ }
+ // Decode the JSON and convert it to Package form.
+ var response driverResponse
+ for dec := json.NewDecoder(buf); dec.More(); {
+ p := new(jsonPackage)
+ if err := dec.Decode(p); err != nil {
+ return nil, fmt.Errorf("JSON decoding failed: %v", err)
+ }
+
+ // Bad package?
+ if p.Name == "" {
+ // This could be due to:
+ // - no such package
+ // - package directory contains no Go source files
+ // - all package declarations are mangled
+ // - and possibly other things.
+ //
+ // For now, we throw it away and let later
+ // stages rediscover the problem, but this
+ // discards the error message computed by go list
+ // and computes a new one---by different logic:
+ // if only one of the package declarations is
+ // bad, for example, should we report an error
+ // in Metadata mode?
+ // Unless we parse and typecheck, we might not
+ // notice there's a problem.
+ //
+ // Perhaps we should save a map of PackageID to
+ // errors for such cases.
+ continue
+ }
+
+ id := p.ImportPath
+
+ // Extract the PkgPath from the package's ID.
+ pkgpath := id
+ if i := strings.IndexByte(id, ' '); i >= 0 {
+ pkgpath = id[:i]
+ }
+
+ if pkgpath == "unsafe" {
+ p.GoFiles = nil // ignore fake unsafe.go file
+ }
+
+ // Assume go list emits only absolute paths for Dir.
+ if !filepath.IsAbs(p.Dir) {
+ log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir)
+ }
+
+ export := p.Export
+ if export != "" && !filepath.IsAbs(export) {
+ export = filepath.Join(p.Dir, export)
+ }
+
+ // imports
+ //
+ // Imports contains the IDs of all imported packages.
+ // ImportsMap records (path, ID) only where they differ.
+ ids := make(map[string]bool)
+ for _, id := range p.Imports {
+ ids[id] = true
+ }
+ imports := make(map[string]*Package)
+ for path, id := range p.ImportMap {
+ imports[path] = &Package{ID: id} // non-identity import
+ delete(ids, id)
+ }
+ for id := range ids {
+ if id == "C" {
+ continue
+ }
+
+ imports[id] = &Package{ID: id} // identity import
+ }
+ if !p.DepOnly {
+ response.Roots = append(response.Roots, id)
+ }
+ pkg := &Package{
+ ID: id,
+ Name: p.Name,
+ PkgPath: pkgpath,
+ GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
+ CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
+ OtherFiles: absJoin(p.Dir, otherFiles(p)...),
+ Imports: imports,
+ ExportFile: export,
+ }
+ // TODO(matloob): Temporary hack since CompiledGoFiles isn't always set.
+ if len(pkg.CompiledGoFiles) == 0 {
+ pkg.CompiledGoFiles = pkg.GoFiles
+ }
+ response.Packages = append(response.Packages, pkg)
+ }
+
+ return &response, nil
+}
+
+// absJoin absolutizes and flattens the lists of files.
+func absJoin(dir string, fileses ...[]string) (res []string) {
+ for _, files := range fileses {
+ for _, file := range files {
+ if !filepath.IsAbs(file) {
+ file = filepath.Join(dir, file)
+ }
+ res = append(res, file)
+ }
+ }
+ return res
+}
+
+func golistargs(cfg *Config, words []string) []string {
+ fullargs := []string{
+ "list", "-e", "-json", "-compiled",
+ fmt.Sprintf("-test=%t", cfg.Tests),
+ fmt.Sprintf("-export=%t", usesExportData(cfg)),
+ fmt.Sprintf("-deps=%t", cfg.Mode >= LoadImports),
+ }
+ fullargs = append(fullargs, cfg.Flags...)
+ fullargs = append(fullargs, "--")
+ fullargs = append(fullargs, words...)
+ return fullargs
+}
+
+// golist returns the JSON-encoded result of a "go list args..." query.
+func golist(cfg *Config, args []string) (*bytes.Buffer, error) {
+ out := new(bytes.Buffer)
+ cmd := exec.CommandContext(cfg.Context, "go", args...)
+ cmd.Env = cfg.Env
+ cmd.Dir = cfg.Dir
+ cmd.Stdout = out
+ cmd.Stderr = new(bytes.Buffer)
+ if err := cmd.Run(); err != nil {
+ exitErr, ok := err.(*exec.ExitError)
+ if !ok {
+ // Catastrophic error:
+ // - executable not found
+ // - context cancellation
+ return nil, fmt.Errorf("couldn't exec 'go list': %s %T", err, err)
+ }
+
+ // Old go list?
+ if strings.Contains(fmt.Sprint(cmd.Stderr), "flag provided but not defined") {
+ return nil, goTooOldError{fmt.Errorf("unsupported version of go list: %s: %s", exitErr, cmd.Stderr)}
+ }
+
+ // Export mode entails a build.
+ // If that build fails, errors appear on stderr
+ // (despite the -e flag) and the Export field is blank.
+ // Do not fail in that case.
+ if !usesExportData(cfg) {
+ return nil, fmt.Errorf("go list: %s: %s", exitErr, cmd.Stderr)
+ }
+ }
+
+ // Print standard error output from "go list".
+ // Due to the -e flag, this should be empty.
+ // However, in -export mode it contains build errors.
+ // Should go list save build errors in the Package.Error JSON field?
+ // See https://github.com/golang/go/issues/26319.
+ // If so, then we should continue to print stderr as go list
+ // will be silent unless something unexpected happened.
+ // If not, perhaps we should suppress it to reduce noise.
+ if stderr := fmt.Sprint(cmd.Stderr); stderr != "" {
+ fmt.Fprintf(os.Stderr, "go list stderr <<%s>>\n", stderr)
+ }
+
+ // debugging
+ if false {
+ fmt.Fprintln(os.Stderr, out)
+ }
+
+ return out, nil
+}
diff --git a/vendor/golang.org/x/tools/go/packages/golist_fallback.go b/vendor/golang.org/x/tools/go/packages/golist_fallback.go
new file mode 100644
index 00000000..331bb655
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/golist_fallback.go
@@ -0,0 +1,282 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "go/build"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/go/internal/cgo"
+)
+
+// TODO(matloob): Delete this file once Go 1.12 is released.
+
+// This file provides backwards compatibility support for
+// loading for versions of Go earlier than 1.10.4. This support is meant to
+// assist with migration to the Package API until there's
+// widespread adoption of these newer Go versions.
+// This support will be removed once Go 1.12 is released
+// in Q1 2019.
+
+func golistDriverFallback(cfg *Config, words ...string) (*driverResponse, error) {
+ original, deps, err := getDeps(cfg, words...)
+ if err != nil {
+ return nil, err
+ }
+
+ var tmpdir string // used for generated cgo files
+
+ var response driverResponse
+ addPackage := func(p *jsonPackage) {
+ if p.Name == "" {
+ return
+ }
+
+ id := p.ImportPath
+ isRoot := original[id] != nil
+ pkgpath := id
+
+ if pkgpath == "unsafe" {
+ p.GoFiles = nil // ignore fake unsafe.go file
+ }
+
+ importMap := func(importlist []string) map[string]*Package {
+ importMap := make(map[string]*Package)
+ for _, id := range importlist {
+
+ if id == "C" {
+ for _, path := range []string{"unsafe", "syscall", "runtime/cgo"} {
+ if pkgpath != path && importMap[path] == nil {
+ importMap[path] = &Package{ID: path}
+ }
+ }
+ continue
+ }
+ importMap[vendorlessPath(id)] = &Package{ID: id}
+ }
+ return importMap
+ }
+ compiledGoFiles := absJoin(p.Dir, p.GoFiles)
+ // Use a function to simplify control flow. It's just a bunch of gotos.
+ var cgoErrors []error
+ processCgo := func() bool {
+ // Suppress any cgo errors. Any relevant errors will show up in typechecking.
+ // TODO(matloob): Skip running cgo if Mode < LoadTypes.
+ if tmpdir == "" {
+ if tmpdir, err = ioutil.TempDir("", "gopackages"); err != nil {
+ cgoErrors = append(cgoErrors, err)
+ return false
+ }
+ }
+ outdir := filepath.Join(tmpdir, strings.Replace(p.ImportPath, "/", "_", -1))
+ if err := os.Mkdir(outdir, 0755); err != nil {
+ cgoErrors = append(cgoErrors, err)
+ return false
+ }
+ files, _, err := runCgo(p.Dir, outdir, cfg.Env)
+ if err != nil {
+ cgoErrors = append(cgoErrors, err)
+ return false
+ }
+ compiledGoFiles = append(compiledGoFiles, files...)
+ return true
+ }
+ if len(p.CgoFiles) == 0 || !processCgo() {
+ compiledGoFiles = append(compiledGoFiles, absJoin(p.Dir, p.CgoFiles)...) // Punt to typechecker.
+ }
+ if isRoot {
+ response.Roots = append(response.Roots, id)
+ }
+ response.Packages = append(response.Packages, &Package{
+ ID: id,
+ Name: p.Name,
+ GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
+ CompiledGoFiles: compiledGoFiles,
+ OtherFiles: absJoin(p.Dir, otherFiles(p)...),
+ PkgPath: pkgpath,
+ Imports: importMap(p.Imports),
+ // TODO(matloob): set errors on the Package to cgoErrors
+ })
+ if cfg.Tests {
+ testID := fmt.Sprintf("%s [%s.test]", id, id)
+ if len(p.TestGoFiles) > 0 || len(p.XTestGoFiles) > 0 {
+ if isRoot {
+ response.Roots = append(response.Roots, testID)
+ }
+ response.Packages = append(response.Packages, &Package{
+ ID: testID,
+ Name: p.Name,
+ GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles, p.TestGoFiles),
+ CompiledGoFiles: append(compiledGoFiles, absJoin(p.Dir, p.TestGoFiles)...),
+ OtherFiles: absJoin(p.Dir, otherFiles(p)...),
+ PkgPath: pkgpath,
+ Imports: importMap(append(p.Imports, p.TestImports...)),
+ // TODO(matloob): set errors on the Package to cgoErrors
+ })
+ if len(p.XTestGoFiles) > 0 {
+ xtestID := fmt.Sprintf("%s_test [%s.test]", id, id)
+ if isRoot {
+ response.Roots = append(response.Roots, xtestID)
+ }
+ for i, imp := range p.XTestImports {
+ if imp == p.ImportPath {
+ p.XTestImports[i] = testID
+ break
+ }
+ }
+ response.Packages = append(response.Packages, &Package{
+ ID: xtestID,
+ Name: p.Name + "_test",
+ GoFiles: absJoin(p.Dir, p.XTestGoFiles),
+ CompiledGoFiles: absJoin(p.Dir, p.XTestGoFiles),
+ PkgPath: pkgpath,
+ Imports: importMap(p.XTestImports),
+ })
+ }
+ }
+ }
+ }
+
+ for _, pkg := range original {
+ addPackage(pkg)
+ }
+ if cfg.Mode < LoadImports || len(deps) == 0 {
+ return &response, nil
+ }
+
+ buf, err := golist(cfg, golistArgsFallback(cfg, deps))
+ if err != nil {
+ return nil, err
+ }
+
+ // Decode the JSON and convert it to Package form.
+ for dec := json.NewDecoder(buf); dec.More(); {
+ p := new(jsonPackage)
+ if err := dec.Decode(p); err != nil {
+ return nil, fmt.Errorf("JSON decoding failed: %v", err)
+ }
+
+ addPackage(p)
+ }
+
+ return &response, nil
+}
+
+// vendorlessPath returns the devendorized version of the import path ipath.
+// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b".
+// Copied from golang.org/x/tools/imports/fix.go.
+func vendorlessPath(ipath string) string {
+ // Devendorize for use in import statement.
+ if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 {
+ return ipath[i+len("/vendor/"):]
+ }
+ if strings.HasPrefix(ipath, "vendor/") {
+ return ipath[len("vendor/"):]
+ }
+ return ipath
+}
+
+// getDeps runs an initial go list to determine all the dependency packages.
+func getDeps(cfg *Config, words ...string) (originalSet map[string]*jsonPackage, deps []string, err error) {
+ buf, err := golist(cfg, golistArgsFallback(cfg, words))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ depsSet := make(map[string]bool)
+ originalSet = make(map[string]*jsonPackage)
+ var testImports []string
+
+ // Extract deps from the JSON.
+ for dec := json.NewDecoder(buf); dec.More(); {
+ p := new(jsonPackage)
+ if err := dec.Decode(p); err != nil {
+ return nil, nil, fmt.Errorf("JSON decoding failed: %v", err)
+ }
+
+ originalSet[p.ImportPath] = p
+ for _, dep := range p.Deps {
+ depsSet[dep] = true
+ }
+ if cfg.Tests {
+ // collect the additional imports of the test packages.
+ pkgTestImports := append(p.TestImports, p.XTestImports...)
+ for _, imp := range pkgTestImports {
+ if depsSet[imp] {
+ continue
+ }
+ depsSet[imp] = true
+ testImports = append(testImports, imp)
+ }
+ }
+ }
+ // Get the deps of the packages imported by tests.
+ if len(testImports) > 0 {
+ buf, err = golist(cfg, golistArgsFallback(cfg, testImports))
+ if err != nil {
+ return nil, nil, err
+ }
+ // Extract deps from the JSON.
+ for dec := json.NewDecoder(buf); dec.More(); {
+ p := new(jsonPackage)
+ if err := dec.Decode(p); err != nil {
+ return nil, nil, fmt.Errorf("JSON decoding failed: %v", err)
+ }
+ for _, dep := range p.Deps {
+ depsSet[dep] = true
+ }
+ }
+ }
+
+ for orig := range originalSet {
+ delete(depsSet, orig)
+ }
+
+ deps = make([]string, 0, len(depsSet))
+ for dep := range depsSet {
+ deps = append(deps, dep)
+ }
+ sort.Strings(deps) // ensure output is deterministic
+ return originalSet, deps, nil
+}
+
+func golistArgsFallback(cfg *Config, words []string) []string {
+ fullargs := []string{"list", "-e", "-json"}
+ fullargs = append(fullargs, cfg.Flags...)
+ fullargs = append(fullargs, "--")
+ fullargs = append(fullargs, words...)
+ return fullargs
+}
+
+func runCgo(pkgdir, tmpdir string, env []string) (files, displayfiles []string, err error) {
+ // Use go/build to open cgo files and determine the cgo flags, etc, from them.
+ // This is tricky so it's best to avoid reimplementing as much as we can, and
+ // we plan to delete this support once Go 1.12 is released anyways.
+ // TODO(matloob): This isn't completely correct because we're using the Default
+ // context. Perhaps we should more accurately fill in the context.
+ bp, err := build.ImportDir(pkgdir, build.ImportMode(0))
+ if err != nil {
+ return nil, nil, err
+ }
+ for _, ev := range env {
+ if v := strings.TrimPrefix(ev, "CGO_CPPFLAGS"); v != ev {
+ bp.CgoCPPFLAGS = append(bp.CgoCPPFLAGS, strings.Fields(v)...)
+ } else if v := strings.TrimPrefix(ev, "CGO_CFLAGS"); v != ev {
+ bp.CgoCFLAGS = append(bp.CgoCFLAGS, strings.Fields(v)...)
+ } else if v := strings.TrimPrefix(ev, "CGO_CXXFLAGS"); v != ev {
+ bp.CgoCXXFLAGS = append(bp.CgoCXXFLAGS, strings.Fields(v)...)
+ } else if v := strings.TrimPrefix(ev, "CGO_LDFLAGS"); v != ev {
+ bp.CgoLDFLAGS = append(bp.CgoLDFLAGS, strings.Fields(v)...)
+ }
+ }
+ return cgo.Run(bp, pkgdir, tmpdir, true)
+}
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
new file mode 100644
index 00000000..7e8e4e2e
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/packages.go
@@ -0,0 +1,824 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+// See doc.go for package documentation and implementation notes.
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "log"
+ "os"
+ "sync"
+
+ "golang.org/x/tools/go/gcexportdata"
+)
+
+// A LoadMode specifies the amount of detail to return when loading packages.
+// The modes are all strictly additive, as you go through the list it increases
+// the amount of information available to you, but may also increase the cost
+// of collecting the information.
+// Load is always allowed to return more information than requested.
+type LoadMode int
+
+const (
+ // LoadFiles finds the packages and computes their source file lists.
+ // Package fields: ID, Name, Errors, GoFiles, OtherFiles.
+ LoadFiles LoadMode = iota
+
+ // LoadImports adds import information for each package
+ // and its dependencies.
+ // Package fields added: Imports.
+ LoadImports
+
+ // LoadTypes adds type information for the package's exported symbols.
+ // Package fields added: Types, Fset, IllTyped.
+ // This will use the type information provided by the build system if
+ // possible, and the ExportFile field may be filled in.
+ LoadTypes
+
+ // LoadSyntax adds typed syntax trees for the packages matching the patterns.
+ // Package fields added: Syntax, TypesInfo, for direct pattern matches only.
+ LoadSyntax
+
+ // LoadAllSyntax adds typed syntax trees for the packages matching the patterns
+ // and all dependencies.
+ // Package fields added: Syntax, TypesInfo, for all packages in import graph.
+ LoadAllSyntax
+)
+
+// An Config specifies details about how packages should be loaded.
+// Calls to Load do not modify this struct.
+type Config struct {
+ // Mode controls the level of information returned for each package.
+ Mode LoadMode
+
+ // Context specifies the context for the load operation.
+ // If the context is cancelled, the loader may stop early
+ // and return an ErrCancelled error.
+ // If Context is nil, the load cannot be cancelled.
+ Context context.Context
+
+ // Dir is the directory in which to run the build system tool
+ // that provides information about the packages.
+ // If Dir is empty, the tool is run in the current directory.
+ Dir string
+
+ // Env is the environment to use when invoking the build system tool.
+ // If Env is nil, the current environment is used.
+ // Like in os/exec's Cmd, only the last value in the slice for
+ // each environment key is used. To specify the setting of only
+ // a few variables, append to the current environment, as in:
+ //
+ // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386")
+ //
+ Env []string
+
+ // Flags is a list of command-line flags to be passed through to
+ // the underlying query tool.
+ Flags []string
+
+ // Error is called for each error encountered during package loading.
+ // It must be safe to call Error simultaneously from multiple goroutines.
+ // In addition to calling Error, the loader will record each error
+ // in the corresponding Package's Errors list.
+ // If Error is nil, the loader will print errors to os.Stderr.
+ // To disable printing of errors, set opt.Error = func(error){}.
+ // TODO(rsc): What happens in the Metadata loader? Currently nothing.
+ Error func(error)
+
+ // Fset is the token.FileSet to use when parsing source files or
+ // type information provided by the build system.
+ // If Fset is nil, the loader will create one.
+ Fset *token.FileSet
+
+ // ParseFile is called to read and parse each file
+ // when preparing a package's type-checked syntax tree.
+ // It must be safe to call ParseFile simultaneously from multiple goroutines.
+ // If ParseFile is nil, the loader will uses parser.ParseFile.
+ //
+ // Setting ParseFile to a custom implementation can allow
+ // providing alternate file content in order to type-check
+ // unsaved text editor buffers, or to selectively eliminate
+ // unwanted function bodies to reduce the amount of work
+ // done by the type checker.
+ ParseFile func(fset *token.FileSet, filename string) (*ast.File, error)
+
+ // If Tests is set, the loader includes not just the packages
+ // matching a particular pattern but also any related test packages,
+ // including test-only variants of the package and the test executable.
+ //
+ // For example, when using the go command, loading "fmt" with Tests=true
+ // returns four packages, with IDs "fmt" (the standard package),
+ // "fmt [fmt.test]" (the package as compiled for the test),
+ // "fmt_test" (the test functions from source files in package fmt_test),
+ // and "fmt.test" (the test binary).
+ //
+ // In build systems with explicit names for tests,
+ // setting Tests may have no effect.
+ Tests bool
+
+ // TypeChecker provides additional configuration for type-checking syntax trees.
+ //
+ // It is used for all packages in LoadAllSyntax mode,
+ // and for the packages matching the patterns, but not their dependencies,
+ // in LoadSyntax mode.
+ //
+ // The TypeChecker.Error function is ignored:
+ // errors are reported using the Error function defined above.
+ //
+ // The TypeChecker.Importer function is ignored:
+ // the loader defines an appropriate importer.
+ //
+ // TODO(rsc): TypeChecker.Sizes should use the same sizes as the main build.
+ // Derive them from the runtime?
+ TypeChecker types.Config
+}
+
+// driver is the type for functions that query the build system for the
+// packages named by the patterns.
+type driver func(cfg *Config, patterns ...string) (*driverResponse, error)
+
+// driverResponse contains the results for a driver query.
+type driverResponse struct {
+ // Roots is the set of package IDs that make up the root packages.
+ // We have to encode this separately because when we encode a single package
+ // we cannot know if it is one of the roots as that requires knowledge of the
+ // graph it is part of.
+ Roots []string `json:",omitempty"`
+
+ // Packages is the full set of packages in the graph.
+ // The packages are not connected into a graph.
+ // The Imports if populated will be stubs that only have their ID set.
+ // Imports will be connected and then type and syntax information added in a
+ // later pass (see refine).
+ Packages []*Package
+}
+
+// Load loads and returns the Go packages named by the given patterns.
+//
+// Config specifies loading options;
+// nil behaves the same as an empty Config.
+//
+// Load returns an error if any of the patterns was invalid
+// as defined by the underlying build system.
+// It may return an empty list of packages without an error,
+// for instance for an empty expansion of a valid wildcard.
+func Load(cfg *Config, patterns ...string) ([]*Package, error) {
+ l := newLoader(cfg)
+ response, err := defaultDriver(&l.Config, patterns...)
+ if err != nil {
+ return nil, err
+ }
+ return l.refine(response.Roots, response.Packages...)
+}
+
+// defaultDriver is a driver that looks for an external driver binary, and if
+// it does not find it falls back to the built in go list driver.
+func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
+ driver := findExternalDriver(cfg)
+ if driver == nil {
+ driver = goListDriver
+ }
+ return driver(cfg, patterns...)
+}
+
+// A Package describes a single loaded Go package.
+type Package struct {
+ // ID is a unique identifier for a package,
+ // in a syntax provided by the underlying build system.
+ //
+ // Because the syntax varies based on the build system,
+ // clients should treat IDs as opaque and not attempt to
+ // interpret them.
+ ID string
+
+ // Name is the package name as it appears in the package source code.
+ Name string
+
+ // This is the package path as used by the types package.
+ // This is used to map entries in the type information back to the package
+ // they come from.
+ PkgPath string
+
+ // Errors lists any errors encountered while loading the package.
+ // TODO(rsc): Say something about the errors or at least their Strings,
+ // as far as file:line being at the beginning and so on.
+ Errors []error
+
+ // GoFiles lists the absolute file paths of the package's Go source files.
+ GoFiles []string
+
+ // CompiledGoFiles lists the absolute file paths of the package's source
+ // files that were presented to the compiler.
+ // This may differ from GoFiles if files are processed before compilation.
+ CompiledGoFiles []string
+
+ // OtherFiles lists the absolute file paths of the package's non-Go source files,
+ // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on.
+ OtherFiles []string
+
+ // ExportFile is the absolute path to a file containing the type information
+ // provided by the build system.
+ ExportFile string
+
+ // Imports maps import paths appearing in the package's Go source files
+ // to corresponding loaded Packages.
+ Imports map[string]*Package
+
+ // Types is the type information for the package.
+ // Modes LoadTypes and above set this field for all packages.
+ //
+ // TODO(adonovan): all packages? In Types mode this entails
+ // asymptotically more export data processing than is required
+ // to load the requested packages. Is that what we want?
+ Types *types.Package
+
+ // Fset provides position information for Types, TypesInfo, and Syntax.
+ // Modes LoadTypes and above set this field for all packages.
+ Fset *token.FileSet
+
+ // IllTyped indicates whether the package has any type errors.
+ // Modes LoadTypes and above set this field for all packages.
+ IllTyped bool
+
+ // Syntax is the package's syntax trees, for the files listed in GoFiles.
+ //
+ // Mode LoadSyntax set this field for packages matching the patterns.
+ // Mode LoadSyntaxAll sets this field for all packages, including dependencies.
+ Syntax []*ast.File
+
+ // TypesInfo is the type-checking results for the package's syntax trees.
+ // It is set only when Syntax is set.
+ TypesInfo *types.Info
+}
+
+// packageError is used to serialize structured errors as much as possible.
+// This has members compatible with the golist error type, and possibly some
+// more if we need other error information to survive.
+type packageError struct {
+ Pos string // position of error
+ Err string // the error itself
+}
+
+func (e *packageError) Error() string {
+ return e.Pos + ": " + e.Err
+}
+
+// flatPackage is the JSON form of Package
+// It drops all the type and syntax fields, and transforms the Imports and Errors
+type flatPackage struct {
+ ID string
+ Name string `json:",omitempty"`
+ PkgPath string `json:",omitempty"`
+ Errors []*packageError `json:",omitempty"`
+ GoFiles []string `json:",omitempty"`
+ CompiledGoFiles []string `json:",omitempty"`
+ OtherFiles []string `json:",omitempty"`
+ ExportFile string `json:",omitempty"`
+ Imports map[string]string `json:",omitempty"`
+}
+
+// MarshalJSON returns the Package in its JSON form.
+// For the most part, the structure fields are written out unmodified, and
+// the type and syntax fields are skipped.
+// The imports are written out as just a map of path to package id.
+// The errors are written using a custom type that tries to preserve the
+// structure of error types we know about.
+// This method exists to enable support for additional build systems. It is
+// not intended for use by clients of the API and we may change the format.
+func (p *Package) MarshalJSON() ([]byte, error) {
+ flat := &flatPackage{
+ ID: p.ID,
+ Name: p.Name,
+ PkgPath: p.PkgPath,
+ GoFiles: p.GoFiles,
+ CompiledGoFiles: p.CompiledGoFiles,
+ OtherFiles: p.OtherFiles,
+ ExportFile: p.ExportFile,
+ }
+ if len(p.Errors) > 0 {
+ flat.Errors = make([]*packageError, len(p.Errors))
+ for i, err := range p.Errors {
+ //TODO: best effort mapping of errors to the serialized form
+ switch err := err.(type) {
+ case *packageError:
+ flat.Errors[i] = err
+ default:
+ flat.Errors[i] = &packageError{Err: err.Error()}
+ }
+ }
+ }
+ if len(p.Imports) > 0 {
+ flat.Imports = make(map[string]string, len(p.Imports))
+ for path, ipkg := range p.Imports {
+ flat.Imports[path] = ipkg.ID
+ }
+ }
+ return json.Marshal(flat)
+}
+
+// UnmarshalJSON reads in a Package from its JSON format.
+// See MarshalJSON for details about the format accepted.
+func (p *Package) UnmarshalJSON(b []byte) error {
+ flat := &flatPackage{}
+ if err := json.Unmarshal(b, &flat); err != nil {
+ return err
+ }
+ *p = Package{
+ ID: flat.ID,
+ Name: flat.Name,
+ PkgPath: flat.PkgPath,
+ GoFiles: flat.GoFiles,
+ CompiledGoFiles: flat.CompiledGoFiles,
+ OtherFiles: flat.OtherFiles,
+ ExportFile: flat.ExportFile,
+ }
+ if len(flat.Errors) > 0 {
+ p.Errors = make([]error, len(flat.Errors))
+ for i, err := range flat.Errors {
+ p.Errors[i] = err
+ }
+ }
+ if len(flat.Imports) > 0 {
+ p.Imports = make(map[string]*Package, len(flat.Imports))
+ for path, id := range flat.Imports {
+ p.Imports[path] = &Package{ID: id}
+ }
+ }
+ return nil
+}
+
+func (p *Package) String() string { return p.ID }
+
+// loaderPackage augments Package with state used during the loading phase
+type loaderPackage struct {
+ *Package
+ importErrors map[string]error // maps each bad import to its error
+ loadOnce sync.Once
+ color uint8 // for cycle detection
+ mark, needsrc bool // used when Mode >= LoadTypes
+}
+
+// loader holds the working state of a single call to load.
+type loader struct {
+ pkgs map[string]*loaderPackage
+ Config
+ exportMu sync.Mutex // enforces mutual exclusion of exportdata operations
+}
+
+func newLoader(cfg *Config) *loader {
+ ld := &loader{}
+ if cfg != nil {
+ ld.Config = *cfg
+ }
+ if ld.Context == nil {
+ ld.Context = context.Background()
+ }
+ if ld.Dir == "" {
+ if dir, err := os.Getwd(); err == nil {
+ ld.Dir = dir
+ }
+ }
+
+ if ld.Mode >= LoadTypes {
+ if ld.Fset == nil {
+ ld.Fset = token.NewFileSet()
+ }
+
+ // Error and ParseFile are required even in LoadTypes mode
+ // because we load source if export data is missing.
+
+ if ld.Error == nil {
+ ld.Error = func(e error) {
+ fmt.Fprintln(os.Stderr, e)
+ }
+ }
+
+ if ld.ParseFile == nil {
+ ld.ParseFile = func(fset *token.FileSet, filename string) (*ast.File, error) {
+ const mode = parser.AllErrors | parser.ParseComments
+ return parser.ParseFile(fset, filename, nil, mode)
+ }
+ }
+ }
+ return ld
+}
+
+// refine connects the supplied packages into a graph and then adds type and
+// and syntax information as requested by the LoadMode.
+func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
+ if len(list) == 0 {
+ return nil, fmt.Errorf("packages not found")
+ }
+ isRoot := make(map[string]bool, len(roots))
+ for _, root := range roots {
+ isRoot[root] = true
+ }
+ ld.pkgs = make(map[string]*loaderPackage)
+ // first pass, fixup and build the map and roots
+ var initial []*loaderPackage
+ for _, pkg := range list {
+ lpkg := &loaderPackage{
+ Package: pkg,
+ needsrc: ld.Mode >= LoadAllSyntax ||
+ (ld.Mode >= LoadSyntax && isRoot[pkg.ID]) ||
+ (pkg.ExportFile == "" && pkg.PkgPath != "unsafe"),
+ }
+ ld.pkgs[lpkg.ID] = lpkg
+ if isRoot[lpkg.ID] {
+ initial = append(initial, lpkg)
+ }
+ }
+
+ // Materialize the import graph.
+
+ const (
+ white = 0 // new
+ grey = 1 // in progress
+ black = 2 // complete
+ )
+
+ // visit traverses the import graph, depth-first,
+ // and materializes the graph as Packages.Imports.
+ //
+ // Valid imports are saved in the Packages.Import map.
+ // Invalid imports (cycles and missing nodes) are saved in the importErrors map.
+ // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG.
+ //
+ // visit returns whether the package needs src or has a transitive
+ // dependency on a package that does. These are the only packages
+ // for which we load source code.
+ var stack []*loaderPackage
+ var visit func(lpkg *loaderPackage) bool
+ visit = func(lpkg *loaderPackage) bool {
+ switch lpkg.color {
+ case black:
+ return lpkg.needsrc
+ case grey:
+ panic("internal error: grey node")
+ }
+ lpkg.color = grey
+ stack = append(stack, lpkg) // push
+ stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
+ lpkg.Imports = make(map[string]*Package, len(stubs))
+ for importPath, ipkg := range stubs {
+ var importErr error
+ imp := ld.pkgs[ipkg.ID]
+ if imp == nil {
+ // (includes package "C" when DisableCgo)
+ importErr = fmt.Errorf("missing package: %q", ipkg.ID)
+ } else if imp.color == grey {
+ importErr = fmt.Errorf("import cycle: %s", stack)
+ }
+ if importErr != nil {
+ if lpkg.importErrors == nil {
+ lpkg.importErrors = make(map[string]error)
+ }
+ lpkg.importErrors[importPath] = importErr
+ continue
+ }
+
+ if visit(imp) {
+ lpkg.needsrc = true
+ }
+ lpkg.Imports[importPath] = imp.Package
+ }
+
+ stack = stack[:len(stack)-1] // pop
+ lpkg.color = black
+
+ return lpkg.needsrc
+ }
+
+ if ld.Mode < LoadImports {
+ //we do this to drop the stub import packages that we are not even going to try to resolve
+ for _, lpkg := range initial {
+ lpkg.Imports = nil
+ }
+ } else {
+ // For each initial package, create its import DAG.
+ for _, lpkg := range initial {
+ visit(lpkg)
+ }
+ }
+ // Load type data if needed, starting at
+ // the initial packages (roots of the import DAG).
+ if ld.Mode >= LoadTypes {
+ var wg sync.WaitGroup
+ for _, lpkg := range initial {
+ wg.Add(1)
+ go func(lpkg *loaderPackage) {
+ ld.loadRecursive(lpkg)
+ wg.Done()
+ }(lpkg)
+ }
+ wg.Wait()
+ }
+
+ result := make([]*Package, len(initial))
+ for i, lpkg := range initial {
+ result[i] = lpkg.Package
+ }
+ return result, nil
+}
+
+// loadRecursive loads the specified package and its dependencies,
+// recursively, in parallel, in topological order.
+// It is atomic and idempotent.
+// Precondition: ld.Mode >= LoadTypes.
+func (ld *loader) loadRecursive(lpkg *loaderPackage) {
+ lpkg.loadOnce.Do(func() {
+ // Load the direct dependencies, in parallel.
+ var wg sync.WaitGroup
+ for _, ipkg := range lpkg.Imports {
+ imp := ld.pkgs[ipkg.ID]
+ wg.Add(1)
+ go func(imp *loaderPackage) {
+ ld.loadRecursive(imp)
+ wg.Done()
+ }(imp)
+ }
+ wg.Wait()
+
+ ld.loadPackage(lpkg)
+ })
+}
+
+// loadPackage loads the specified package.
+// It must be called only once per Package,
+// after immediate dependencies are loaded.
+// Precondition: ld.Mode >= LoadTypes.
+func (ld *loader) loadPackage(lpkg *loaderPackage) {
+ if lpkg.PkgPath == "unsafe" {
+ // Fill in the blanks to avoid surprises.
+ lpkg.Types = types.Unsafe
+ lpkg.Fset = ld.Fset
+ lpkg.Syntax = []*ast.File{}
+ lpkg.TypesInfo = new(types.Info)
+ return
+ }
+
+ // Call NewPackage directly with explicit name.
+ // This avoids skew between golist and go/types when the files'
+ // package declarations are inconsistent.
+ lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name)
+
+ if !lpkg.needsrc {
+ ld.loadFromExportData(lpkg)
+ return // not a source package, don't get syntax trees
+ }
+
+ hardErrors := false
+ appendError := func(err error) {
+ if terr, ok := err.(types.Error); ok && terr.Soft {
+ // Don't mark the package as bad.
+ } else {
+ hardErrors = true
+ }
+ ld.Error(err)
+ lpkg.Errors = append(lpkg.Errors, err)
+ }
+
+ files, errs := ld.parseFiles(lpkg.CompiledGoFiles)
+ for _, err := range errs {
+ appendError(err)
+ }
+
+ lpkg.Fset = ld.Fset
+ lpkg.Syntax = files
+
+ lpkg.TypesInfo = &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Scopes: make(map[ast.Node]*types.Scope),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ }
+
+ // Copy the prototype types.Config as it must vary across Packages.
+ tc := ld.TypeChecker // copy
+ tc.Importer = importerFunc(func(path string) (*types.Package, error) {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+
+ // The imports map is keyed by import path.
+ ipkg := lpkg.Imports[path]
+ if ipkg == nil {
+ if err := lpkg.importErrors[path]; err != nil {
+ return nil, err
+ }
+ // There was skew between the metadata and the
+ // import declarations, likely due to an edit
+ // race, or because the ParseFile feature was
+ // used to supply alternative file contents.
+ return nil, fmt.Errorf("no metadata for %s", path)
+ }
+
+ if ipkg.Types != nil && ipkg.Types.Complete() {
+ return ipkg.Types, nil
+ }
+ log.Fatalf("internal error: nil Pkg importing %q from %q", path, lpkg)
+ panic("unreachable")
+ })
+ tc.Error = appendError
+
+ // type-check
+ types.NewChecker(&tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
+
+ lpkg.importErrors = nil // no longer needed
+
+ // If !Cgo, the type-checker uses FakeImportC mode, so
+ // it doesn't invoke the importer for import "C",
+ // nor report an error for the import,
+ // or for any undefined C.f reference.
+ // We must detect this explicitly and correctly
+ // mark the package as IllTyped (by reporting an error).
+ // TODO(adonovan): if these errors are annoying,
+ // we could just set IllTyped quietly.
+ if tc.FakeImportC {
+ outer:
+ for _, f := range lpkg.Syntax {
+ for _, imp := range f.Imports {
+ if imp.Path.Value == `"C"` {
+ appendError(fmt.Errorf(`%s: import "C" ignored`,
+ lpkg.Fset.Position(imp.Pos())))
+ break outer
+ }
+ }
+ }
+ }
+
+ // Record accumulated errors.
+ for _, imp := range lpkg.Imports {
+ if imp.IllTyped {
+ hardErrors = true
+ break
+ }
+ }
+
+ lpkg.IllTyped = hardErrors
+}
+
+// An importFunc is an implementation of the single-method
+// types.Importer interface based on a function value.
+type importerFunc func(path string) (*types.Package, error)
+
+func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
+
+// We use a counting semaphore to limit
+// the number of parallel I/O calls per process.
+var ioLimit = make(chan bool, 20)
+
+// parseFiles reads and parses the Go source files and returns the ASTs
+// of the ones that could be at least partially parsed, along with a
+// list of I/O and parse errors encountered.
+//
+// Because files are scanned in parallel, the token.Pos
+// positions of the resulting ast.Files are not ordered.
+//
+func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
+ var wg sync.WaitGroup
+ n := len(filenames)
+ parsed := make([]*ast.File, n)
+ errors := make([]error, n)
+ for i, file := range filenames {
+ wg.Add(1)
+ go func(i int, filename string) {
+ ioLimit <- true // wait
+ // ParseFile may return both an AST and an error.
+ parsed[i], errors[i] = ld.ParseFile(ld.Fset, filename)
+ <-ioLimit // signal
+ wg.Done()
+ }(i, file)
+ }
+ wg.Wait()
+
+ // Eliminate nils, preserving order.
+ var o int
+ for _, f := range parsed {
+ if f != nil {
+ parsed[o] = f
+ o++
+ }
+ }
+ parsed = parsed[:o]
+
+ o = 0
+ for _, err := range errors {
+ if err != nil {
+ errors[o] = err
+ o++
+ }
+ }
+ errors = errors[:o]
+
+ return parsed, errors
+}
+
+// loadFromExportData returns type information for the specified
+// package, loading it from an export data file on the first request.
+func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) {
+ if lpkg.PkgPath == "" {
+ log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
+ }
+
+ // Because gcexportdata.Read has the potential to create or
+ // modify the types.Package for each node in the transitive
+ // closure of dependencies of lpkg, all exportdata operations
+ // must be sequential. (Finer-grained locking would require
+ // changes to the gcexportdata API.)
+ //
+ // The exportMu lock guards the Package.Pkg field and the
+ // types.Package it points to, for each Package in the graph.
+ //
+ // Not all accesses to Package.Pkg need to be protected by exportMu:
+ // graph ordering ensures that direct dependencies of source
+ // packages are fully loaded before the importer reads their Pkg field.
+ ld.exportMu.Lock()
+ defer ld.exportMu.Unlock()
+
+ if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
+ return tpkg, nil // cache hit
+ }
+
+ lpkg.IllTyped = true // fail safe
+
+ if lpkg.ExportFile == "" {
+ // Errors while building export data will have been printed to stderr.
+ return nil, fmt.Errorf("no export data file")
+ }
+ f, err := os.Open(lpkg.ExportFile)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ // Read gc export data.
+ //
+ // We don't currently support gccgo export data because all
+ // underlying workspaces use the gc toolchain. (Even build
+ // systems that support gccgo don't use it for workspace
+ // queries.)
+ r, err := gcexportdata.NewReader(f)
+ if err != nil {
+ return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ }
+
+ // Build the view.
+ //
+ // The gcexportdata machinery has no concept of package ID.
+ // It identifies packages by their PkgPath, which although not
+ // globally unique is unique within the scope of one invocation
+ // of the linker, type-checker, or gcexportdata.
+ //
+ // So, we must build a PkgPath-keyed view of the global
+ // (conceptually ID-keyed) cache of packages and pass it to
+ // gcexportdata. The view must contain every existing
+ // package that might possibly be mentioned by the
+ // current package---its transitive closure.
+ //
+ // TODO(adonovan): it would be more simpler and more efficient
+ // if the export data machinery invoked a callback to
+ // get-or-create a package instead of a map.
+ //
+ view := make(map[string]*types.Package) // view seen by gcexportdata
+ seen := make(map[*loaderPackage]bool) // all visited packages
+ var visit func(pkgs map[string]*Package)
+ visit = func(pkgs map[string]*Package) {
+ for _, p := range pkgs {
+ lpkg := ld.pkgs[p.ID]
+ if !seen[lpkg] {
+ seen[lpkg] = true
+ view[lpkg.PkgPath] = lpkg.Types
+ visit(lpkg.Imports)
+ }
+ }
+ }
+ visit(lpkg.Imports)
+
+ // Parse the export data.
+ // (May create/modify packages in view.)
+ tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
+ if err != nil {
+ return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ }
+
+ lpkg.Types = tpkg
+ lpkg.IllTyped = false
+
+ return tpkg, nil
+}
+
+func usesExportData(cfg *Config) bool {
+ return LoadTypes <= cfg.Mode && cfg.Mode < LoadAllSyntax
+}