Rename scripts to build and add revive command as a new build tool command (#10942)

Co-authored-by: techknowlogick <techknowlogick@gitea.io>
This commit is contained in:
Lunny Xiao 2020-04-04 03:29:12 +08:00 committed by GitHub
parent 4af7c47b38
commit 4f63f283c4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
182 changed files with 15832 additions and 1226 deletions

View file

@ -14,14 +14,14 @@ import (
"sync"
)
// TraverseLink is used as a return value from WalkFuncs to indicate that the
// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the
// symlink named in the call may be traversed.
var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
// SkipFiles is a used as a return value from WalkFuncs to indicate that the
// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the
// callback should not be called for any other files in the current directory.
// Child directories will still be traversed.
var SkipFiles = errors.New("fastwalk: skip remaining files in directory")
var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory")
// Walk is a faster implementation of filepath.Walk.
//
@ -167,7 +167,7 @@ func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error {
err := w.fn(joined, typ)
if typ == os.ModeSymlink {
if err == TraverseLink {
if err == ErrTraverseLink {
// Set callbackDone so we don't call it twice for both the
// symlink-as-symlink and the symlink-as-directory later:
w.enqueue(walkItem{dir: joined, callbackDone: true})

View file

@ -26,7 +26,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
continue
}
if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil {
if err == SkipFiles {
if err == ErrSkipFiles {
skipFiles = true
continue
}

View file

@ -66,7 +66,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
continue
}
if err := fn(dirName, name, typ); err != nil {
if err == SkipFiles {
if err == ErrSkipFiles {
skipFiles = true
continue
}

121
vendor/golang.org/x/tools/internal/gocommand/invoke.go generated vendored Normal file
View file

@ -0,0 +1,121 @@
// Package gocommand is a helper for calling the go command.
package gocommand
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
"strings"
"time"
)
// An Invocation represents a call to the go command.
type Invocation struct {
Verb string
Args []string
BuildFlags []string
Env []string
WorkingDir string
Logf func(format string, args ...interface{})
}
// Run runs the invocation, returning its stdout and an error suitable for
// human consumption, including stderr.
func (i *Invocation) Run(ctx context.Context) (*bytes.Buffer, error) {
stdout, _, friendly, _ := i.RunRaw(ctx)
return stdout, friendly
}
// RunRaw is like Run, but also returns the raw stderr and error for callers
// that want to do low-level error handling/recovery.
func (i *Invocation) RunRaw(ctx context.Context) (stdout *bytes.Buffer, stderr *bytes.Buffer, friendlyError error, rawError error) {
log := i.Logf
if log == nil {
log = func(string, ...interface{}) {}
}
goArgs := []string{i.Verb}
switch i.Verb {
case "mod":
// mod needs the sub-verb before build flags.
goArgs = append(goArgs, i.Args[0])
goArgs = append(goArgs, i.BuildFlags...)
goArgs = append(goArgs, i.Args[1:]...)
case "env":
// env doesn't take build flags.
goArgs = append(goArgs, i.Args...)
default:
goArgs = append(goArgs, i.BuildFlags...)
goArgs = append(goArgs, i.Args...)
}
cmd := exec.Command("go", goArgs...)
stdout = &bytes.Buffer{}
stderr = &bytes.Buffer{}
cmd.Stdout = stdout
cmd.Stderr = stderr
// On darwin the cwd gets resolved to the real path, which breaks anything that
// expects the working directory to keep the original path, including the
// go command when dealing with modules.
// The Go stdlib has a special feature where if the cwd and the PWD are the
// same node then it trusts the PWD, so by setting it in the env for the child
// process we fix up all the paths returned by the go command.
cmd.Env = append(append([]string{}, i.Env...), "PWD="+i.WorkingDir)
cmd.Dir = i.WorkingDir
defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
rawError = runCmdContext(ctx, cmd)
friendlyError = rawError
if rawError != nil {
// Check for 'go' executable not being found.
if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
friendlyError = fmt.Errorf("go command required, not found: %v", ee)
}
if ctx.Err() != nil {
friendlyError = ctx.Err()
}
friendlyError = fmt.Errorf("err: %v: stderr: %s", rawError, stderr)
}
return
}
// runCmdContext is like exec.CommandContext except it sends os.Interrupt
// before os.Kill.
func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
if err := cmd.Start(); err != nil {
return err
}
resChan := make(chan error, 1)
go func() {
resChan <- cmd.Wait()
}()
select {
case err := <-resChan:
return err
case <-ctx.Done():
}
// Cancelled. Interrupt and see if it ends voluntarily.
cmd.Process.Signal(os.Interrupt)
select {
case err := <-resChan:
return err
case <-time.After(time.Second):
}
// Didn't shut down in response to interrupt. Kill it hard.
cmd.Process.Kill()
return <-resChan
}
func cmdDebugStr(cmd *exec.Cmd) string {
env := make(map[string]string)
for _, kv := range cmd.Env {
split := strings.Split(kv, "=")
k, v := split[0], split[1]
env[k] = v
}
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args)
}

View file

@ -77,6 +77,7 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root
}
}
// walkDir creates a walker and starts fastwalk with this walker.
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
if opts.Debug {
@ -114,7 +115,7 @@ type walker struct {
ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files.
}
// init initializes the walker based on its Options.
// init initializes the walker based on its Options
func (w *walker) init() {
var ignoredPaths []string
if w.root.Type == RootModuleCache {
@ -167,6 +168,7 @@ func (w *walker) getIgnoredDirs(path string) []string {
return ignoredDirs
}
// shouldSkipDir reports whether the file should be skipped or not.
func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool {
for _, ignoredDir := range w.ignoredDirs {
if os.SameFile(fi, ignoredDir) {
@ -180,20 +182,21 @@ func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool {
return false
}
// walk walks through the given path.
func (w *walker) walk(path string, typ os.FileMode) error {
dir := filepath.Dir(path)
if typ.IsRegular() {
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
// Doesn't make sense to have regular files
// directly in your $GOPATH/src or $GOROOT/src.
return fastwalk.SkipFiles
return fastwalk.ErrSkipFiles
}
if !strings.HasSuffix(path, ".go") {
return nil
}
w.add(w.root, dir)
return fastwalk.SkipFiles
return fastwalk.ErrSkipFiles
}
if typ == os.ModeDir {
base := filepath.Base(path)
@ -221,7 +224,7 @@ func (w *walker) walk(path string, typ os.FileMode) error {
return nil
}
if w.shouldTraverse(dir, fi) {
return fastwalk.TraverseLink
return fastwalk.ErrTraverseLink
}
}
return nil

View file

@ -14,7 +14,6 @@ import (
"go/token"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
@ -22,12 +21,11 @@ import (
"strconv"
"strings"
"sync"
"time"
"unicode"
"unicode/utf8"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/packages"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/gopathwalk"
)
@ -82,7 +80,8 @@ type ImportFix struct {
// IdentName is the identifier that this fix will add or remove.
IdentName string
// FixType is the type of fix this is (AddImport, DeleteImport, SetImportName).
FixType ImportFixType
FixType ImportFixType
Relevance int // see pkg
}
// An ImportInfo represents a single import statement.
@ -537,7 +536,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
// derive package names from import paths, see if the file is already
// complete. We can't add any imports yet, because we don't know
// if missing references are actually package vars.
p := &pass{fset: fset, f: f, srcDir: srcDir}
p := &pass{fset: fset, f: f, srcDir: srcDir, env: env}
if fixes, done := p.load(); done {
return fixes, nil
}
@ -559,8 +558,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
}
// Third pass: get real package names where we had previously used
// the naive algorithm. This is the first step that will use the
// environment, so we provide it here for the first time.
// the naive algorithm.
p = &pass{fset: fset, f: f, srcDir: srcDir, env: env}
p.loadRealPackageNames = true
p.otherFiles = otherFiles
@ -585,62 +583,86 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
return fixes, nil
}
// getCandidatePkgs returns the list of pkgs that are accessible from filename,
// optionall filtered to only packages named pkgName.
func getCandidatePkgs(pkgName, filename string, env *ProcessEnv) ([]*pkg, error) {
// TODO(heschi): filter out current package. (Don't forget x_test can import x.)
// Highest relevance, used for the standard library. Chosen arbitrarily to
// match pre-existing gopls code.
const MaxRelevance = 7
var result []*pkg
// getCandidatePkgs works with the passed callback to find all acceptable packages.
// It deduplicates by import path, and uses a cached stdlib rather than reading
// from disk.
func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filename, filePkg string, env *ProcessEnv) error {
notSelf := func(p *pkg) bool {
return p.packageName != filePkg || p.dir != filepath.Dir(filename)
}
// Start off with the standard library.
for importPath := range stdlib {
if pkgName != "" && path.Base(importPath) != pkgName {
continue
}
result = append(result, &pkg{
for importPath, exports := range stdlib {
p := &pkg{
dir: filepath.Join(env.GOROOT, "src", importPath),
importPathShort: importPath,
packageName: path.Base(importPath),
relevance: 0,
})
}
// Exclude goroot results -- getting them is relatively expensive, not cached,
// and generally redundant with the in-memory version.
exclude := []gopathwalk.RootType{gopathwalk.RootGOROOT}
// Only the go/packages resolver uses the first argument, and nobody uses that resolver.
scannedPkgs, err := env.GetResolver().scan(nil, true, exclude)
if err != nil {
return nil, err
relevance: MaxRelevance,
}
if notSelf(p) && wrappedCallback.packageNameLoaded(p) {
wrappedCallback.exportsLoaded(p, exports)
}
}
var mu sync.Mutex
dupCheck := map[string]struct{}{}
for _, pkg := range scannedPkgs {
if pkgName != "" && pkg.packageName != pkgName {
continue
}
if !canUse(filename, pkg.dir) {
continue
}
if _, ok := dupCheck[pkg.importPathShort]; ok {
continue
}
dupCheck[pkg.importPathShort] = struct{}{}
result = append(result, pkg)
scanFilter := &scanCallback{
rootFound: func(root gopathwalk.Root) bool {
// Exclude goroot results -- getting them is relatively expensive, not cached,
// and generally redundant with the in-memory version.
return root.Type != gopathwalk.RootGOROOT && wrappedCallback.rootFound(root)
},
dirFound: wrappedCallback.dirFound,
packageNameLoaded: func(pkg *pkg) bool {
mu.Lock()
defer mu.Unlock()
if _, ok := dupCheck[pkg.importPathShort]; ok {
return false
}
dupCheck[pkg.importPathShort] = struct{}{}
return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg)
},
exportsLoaded: func(pkg *pkg, exports []string) {
// If we're an x_test, load the package under test's test variant.
if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) {
var err error
_, exports, err = loadExportsFromFiles(ctx, env, pkg.dir, true)
if err != nil {
return
}
}
wrappedCallback.exportsLoaded(pkg, exports)
},
}
return env.GetResolver().scan(ctx, scanFilter)
}
// Sort first by relevance, then by package name, with import path as a tiebreaker.
sort.Slice(result, func(i, j int) bool {
pi, pj := result[i], result[j]
if pi.relevance != pj.relevance {
return pi.relevance < pj.relevance
}
if pi.packageName != pj.packageName {
return pi.packageName < pj.packageName
}
return pi.importPathShort < pj.importPathShort
})
func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) map[string]int {
result := make(map[string]int)
for _, path := range paths {
result[path] = env.GetResolver().scoreImportPath(ctx, path)
}
return result
}
return result, nil
func PrimeCache(ctx context.Context, env *ProcessEnv) error {
// Fully scan the disk for directories, but don't actually read any Go files.
callback := &scanCallback{
rootFound: func(gopathwalk.Root) bool {
return true
},
dirFound: func(pkg *pkg) bool {
return false
},
packageNameLoaded: func(pkg *pkg) bool {
return false
},
}
return getCandidatePkgs(ctx, callback, "", "", env)
}
func candidateImportName(pkg *pkg) string {
@ -651,23 +673,37 @@ func candidateImportName(pkg *pkg) string {
}
// getAllCandidates gets all of the candidates to be imported, regardless of if they are needed.
func getAllCandidates(filename string, env *ProcessEnv) ([]ImportFix, error) {
pkgs, err := getCandidatePkgs("", filename, env)
if err != nil {
return nil, err
func getAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error {
callback := &scanCallback{
rootFound: func(gopathwalk.Root) bool {
return true
},
dirFound: func(pkg *pkg) bool {
if !canUse(filename, pkg.dir) {
return false
}
// Try the assumed package name first, then a simpler path match
// in case of packages named vN, which are not uncommon.
return strings.HasPrefix(ImportPathToAssumedName(pkg.importPathShort), searchPrefix) ||
strings.HasPrefix(path.Base(pkg.importPathShort), searchPrefix)
},
packageNameLoaded: func(pkg *pkg) bool {
if !strings.HasPrefix(pkg.packageName, searchPrefix) {
return false
}
wrapped(ImportFix{
StmtInfo: ImportInfo{
ImportPath: pkg.importPathShort,
Name: candidateImportName(pkg),
},
IdentName: pkg.packageName,
FixType: AddImport,
Relevance: pkg.relevance,
})
return false
},
}
result := make([]ImportFix, 0, len(pkgs))
for _, pkg := range pkgs {
result = append(result, ImportFix{
StmtInfo: ImportInfo{
ImportPath: pkg.importPathShort,
Name: candidateImportName(pkg),
},
IdentName: pkg.packageName,
FixType: AddImport,
})
}
return result, nil
return getCandidatePkgs(ctx, callback, filename, filePkg, env)
}
// A PackageExport is a package and its exports.
@ -676,42 +712,34 @@ type PackageExport struct {
Exports []string
}
func getPackageExports(completePackage, filename string, env *ProcessEnv) ([]PackageExport, error) {
pkgs, err := getCandidatePkgs(completePackage, filename, env)
if err != nil {
return nil, err
func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error {
callback := &scanCallback{
rootFound: func(gopathwalk.Root) bool {
return true
},
dirFound: func(pkg *pkg) bool {
return pkgIsCandidate(filename, references{searchPkg: nil}, pkg)
},
packageNameLoaded: func(pkg *pkg) bool {
return pkg.packageName == searchPkg
},
exportsLoaded: func(pkg *pkg, exports []string) {
sort.Strings(exports)
wrapped(PackageExport{
Fix: &ImportFix{
StmtInfo: ImportInfo{
ImportPath: pkg.importPathShort,
Name: candidateImportName(pkg),
},
IdentName: pkg.packageName,
FixType: AddImport,
Relevance: pkg.relevance,
},
Exports: exports,
})
},
}
results := make([]PackageExport, 0, len(pkgs))
for _, pkg := range pkgs {
fix := &ImportFix{
StmtInfo: ImportInfo{
ImportPath: pkg.importPathShort,
Name: candidateImportName(pkg),
},
IdentName: pkg.packageName,
FixType: AddImport,
}
var exports []string
if e, ok := stdlib[pkg.importPathShort]; ok {
exports = e
} else {
exports, err = loadExportsForPackage(context.Background(), env, completePackage, pkg)
if err != nil {
if env.Debug {
env.Logf("while completing %q, error loading exports from %q: %v", completePackage, pkg.importPathShort, err)
}
continue
}
}
sort.Strings(exports)
results = append(results, PackageExport{
Fix: fix,
Exports: exports,
})
}
return results, nil
return getCandidatePkgs(ctx, callback, filename, filePkg, env)
}
// ProcessEnv contains environment variables and settings that affect the use of
@ -720,20 +748,26 @@ type ProcessEnv struct {
LocalPrefix string
Debug bool
BuildFlags []string
// If non-empty, these will be used instead of the
// process-wide values.
GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string
WorkingDir string
// If true, use go/packages regardless of the environment.
ForceGoPackages bool
// Logf is the default logger for the ProcessEnv.
Logf func(format string, args ...interface{})
resolver Resolver
}
// CopyConfig copies the env's configuration into a new env.
func (e *ProcessEnv) CopyConfig() *ProcessEnv {
copy := *e
copy.resolver = nil
return &copy
}
func (e *ProcessEnv) env() []string {
env := os.Environ()
add := func(k, v string) {
@ -757,73 +791,55 @@ func (e *ProcessEnv) GetResolver() Resolver {
if e.resolver != nil {
return e.resolver
}
if e.ForceGoPackages {
e.resolver = &goPackagesResolver{env: e}
return e.resolver
}
out, err := e.invokeGo("env", "GOMOD")
out, err := e.invokeGo(context.TODO(), "env", "GOMOD")
if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 {
e.resolver = &gopathResolver{env: e}
e.resolver = newGopathResolver(e)
return e.resolver
}
e.resolver = &ModuleResolver{env: e}
e.resolver = newModuleResolver(e)
return e.resolver
}
func (e *ProcessEnv) newPackagesConfig(mode packages.LoadMode) *packages.Config {
return &packages.Config{
Mode: mode,
Dir: e.WorkingDir,
Env: e.env(),
}
}
func (e *ProcessEnv) buildContext() *build.Context {
ctx := build.Default
ctx.GOROOT = e.GOROOT
ctx.GOPATH = e.GOPATH
// As of Go 1.14, build.Context has a WorkingDir field
// As of Go 1.14, build.Context has a Dir field
// (see golang.org/issue/34860).
// Populate it only if present.
if wd := reflect.ValueOf(&ctx).Elem().FieldByName("WorkingDir"); wd.IsValid() && wd.Kind() == reflect.String {
wd.SetString(e.WorkingDir)
rc := reflect.ValueOf(&ctx).Elem()
dir := rc.FieldByName("Dir")
if !dir.IsValid() {
// Working drafts of Go 1.14 named the field "WorkingDir" instead.
// TODO(bcmills): Remove this case after the Go 1.14 beta has been released.
dir = rc.FieldByName("WorkingDir")
}
if dir.IsValid() && dir.Kind() == reflect.String {
dir.SetString(e.WorkingDir)
}
return &ctx
}
func (e *ProcessEnv) invokeGo(args ...string) (*bytes.Buffer, error) {
cmd := exec.Command("go", args...)
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
cmd.Stdout = stdout
cmd.Stderr = stderr
cmd.Env = e.env()
cmd.Dir = e.WorkingDir
if e.Debug {
defer func(start time.Time) { e.Logf("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) (*bytes.Buffer, error) {
inv := gocommand.Invocation{
Verb: verb,
Args: args,
BuildFlags: e.BuildFlags,
Env: e.env(),
Logf: e.Logf,
WorkingDir: e.WorkingDir,
}
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("running go: %v (stderr:\n%s)", err, stderr)
}
return stdout, nil
}
func cmdDebugStr(cmd *exec.Cmd) string {
env := make(map[string]string)
for _, kv := range cmd.Env {
split := strings.Split(kv, "=")
k, v := split[0], split[1]
env[k] = v
}
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args)
return inv.Run(ctx)
}
func addStdlibCandidates(pass *pass, refs references) {
add := func(pkg string) {
// Prevent self-imports.
if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.GOROOT, "src", pkg) == pass.srcDir {
return
}
exports := copyExports(stdlib[pkg])
pass.addCandidate(
&ImportInfo{ImportPath: pkg},
@ -848,94 +864,65 @@ func addStdlibCandidates(pass *pass, refs references) {
type Resolver interface {
// loadPackageNames loads the package names in importPaths.
loadPackageNames(importPaths []string, srcDir string) (map[string]string, error)
// scan finds (at least) the packages satisfying refs. If loadNames is true,
// package names will be set on the results, and dirs whose package name
// could not be determined will be excluded.
scan(refs references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error)
// scan works with callback to search for packages. See scanCallback for details.
scan(ctx context.Context, callback *scanCallback) error
// loadExports returns the set of exported symbols in the package at dir.
// loadExports may be called concurrently.
loadExports(ctx context.Context, pkg *pkg) (string, []string, error)
loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error)
// scoreImportPath returns the relevance for an import path.
scoreImportPath(ctx context.Context, path string) int
ClearForNewScan()
}
// gopackagesResolver implements resolver for GOPATH and module workspaces using go/packages.
type goPackagesResolver struct {
env *ProcessEnv
}
func (r *goPackagesResolver) ClearForNewScan() {}
func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
if len(importPaths) == 0 {
return nil, nil
}
cfg := r.env.newPackagesConfig(packages.LoadFiles)
pkgs, err := packages.Load(cfg, importPaths...)
if err != nil {
return nil, err
}
names := map[string]string{}
for _, pkg := range pkgs {
names[VendorlessPath(pkg.PkgPath)] = pkg.Name
}
// We may not have found all the packages. Guess the rest.
for _, path := range importPaths {
if _, ok := names[path]; ok {
continue
}
names[path] = ImportPathToAssumedName(path)
}
return names, nil
}
func (r *goPackagesResolver) scan(refs references, _ bool, _ []gopathwalk.RootType) ([]*pkg, error) {
var loadQueries []string
for pkgName := range refs {
loadQueries = append(loadQueries, "iamashamedtousethedisabledqueryname="+pkgName)
}
sort.Strings(loadQueries)
cfg := r.env.newPackagesConfig(packages.LoadFiles)
goPackages, err := packages.Load(cfg, loadQueries...)
if err != nil {
return nil, err
}
var scan []*pkg
for _, goPackage := range goPackages {
scan = append(scan, &pkg{
dir: filepath.Dir(goPackage.CompiledGoFiles[0]),
importPathShort: VendorlessPath(goPackage.PkgPath),
goPackage: goPackage,
packageName: goPackage.Name,
})
}
return scan, nil
}
func (r *goPackagesResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) {
if pkg.goPackage == nil {
return "", nil, fmt.Errorf("goPackage not set")
}
var exports []string
fset := token.NewFileSet()
for _, fname := range pkg.goPackage.CompiledGoFiles {
f, err := parser.ParseFile(fset, fname, nil, 0)
if err != nil {
return "", nil, fmt.Errorf("parsing %s: %v", fname, err)
}
for name := range f.Scope.Objects {
if ast.IsExported(name) {
exports = append(exports, name)
}
}
}
return pkg.goPackage.Name, exports, nil
// A scanCallback controls a call to scan and receives its results.
// In general, minor errors will be silently discarded; a user should not
// expect to receive a full series of calls for everything.
type scanCallback struct {
// rootFound is called before scanning a new root dir. If it returns true,
// the root will be scanned. Returning false will not necessarily prevent
// directories from that root making it to dirFound.
rootFound func(gopathwalk.Root) bool
// dirFound is called when a directory is found that is possibly a Go package.
// pkg will be populated with everything except packageName.
// If it returns true, the package's name will be loaded.
dirFound func(pkg *pkg) bool
// packageNameLoaded is called when a package is found and its name is loaded.
// If it returns true, the package's exports will be loaded.
packageNameLoaded func(pkg *pkg) bool
// exportsLoaded is called when a package's exports have been loaded.
exportsLoaded func(pkg *pkg, exports []string)
}
func addExternalCandidates(pass *pass, refs references, filename string) error {
dirScan, err := pass.env.GetResolver().scan(refs, false, nil)
var mu sync.Mutex
found := make(map[string][]pkgDistance)
callback := &scanCallback{
rootFound: func(gopathwalk.Root) bool {
return true // We want everything.
},
dirFound: func(pkg *pkg) bool {
return pkgIsCandidate(filename, refs, pkg)
},
packageNameLoaded: func(pkg *pkg) bool {
if _, want := refs[pkg.packageName]; !want {
return false
}
if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName {
// The candidate is in the same directory and has the
// same package name. Don't try to import ourselves.
return false
}
if !canUse(filename, pkg.dir) {
return false
}
mu.Lock()
defer mu.Unlock()
found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)})
return false // We'll do our own loading after we sort.
},
}
err := pass.env.GetResolver().scan(context.Background(), callback)
if err != nil {
return err
}
@ -962,7 +949,7 @@ func addExternalCandidates(pass *pass, refs references, filename string) error {
go func(pkgName string, symbols map[string]bool) {
defer wg.Done()
found, err := findImport(ctx, pass, dirScan, pkgName, symbols, filename)
found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename)
if err != nil {
firstErrOnce.Do(func() {
@ -1033,24 +1020,36 @@ func ImportPathToAssumedName(importPath string) string {
// gopathResolver implements resolver for GOPATH workspaces.
type gopathResolver struct {
env *ProcessEnv
cache *dirInfoCache
env *ProcessEnv
walked bool
cache *dirInfoCache
scanSema chan struct{} // scanSema prevents concurrent scans.
}
func (r *gopathResolver) init() {
if r.cache == nil {
r.cache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
}
func newGopathResolver(env *ProcessEnv) *gopathResolver {
r := &gopathResolver{
env: env,
cache: &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
listeners: map[*int]cacheListener{},
},
scanSema: make(chan struct{}, 1),
}
r.scanSema <- struct{}{}
return r
}
func (r *gopathResolver) ClearForNewScan() {
r.cache = nil
<-r.scanSema
r.cache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
listeners: map[*int]cacheListener{},
}
r.walked = false
r.scanSema <- struct{}{}
}
func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
r.init()
names := map[string]string{}
for _, path := range importPaths {
names[path] = importPathToName(r.env, path, srcDir)
@ -1130,7 +1129,6 @@ func packageDirToName(dir string) (packageName string, err error) {
}
type pkg struct {
goPackage *packages.Package
dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http")
importPathShort string // vendorless import path ("net/http", "a/b")
packageName string // package name loaded from source if requested
@ -1178,8 +1176,7 @@ func distance(basepath, targetpath string) int {
return strings.Count(p, string(filepath.Separator)) + 1
}
func (r *gopathResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) {
r.init()
func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error {
add := func(root gopathwalk.Root, dir string) {
// We assume cached directories have not changed. We can skip them and their
// children.
@ -1196,56 +1193,84 @@ func (r *gopathResolver) scan(_ references, loadNames bool, exclude []gopathwalk
}
r.cache.Store(dir, info)
}
roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), exclude)
gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false})
var result []*pkg
for _, dir := range r.cache.Keys() {
info, ok := r.cache.Load(dir)
if !ok {
continue
}
if loadNames {
var err error
info, err = r.cache.CachePackageName(info)
if err != nil {
continue
}
processDir := func(info directoryPackageInfo) {
// Skip this directory if we were not able to get the package information successfully.
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
return
}
p := &pkg{
importPathShort: info.nonCanonicalImportPath,
dir: dir,
relevance: 1,
packageName: info.packageName,
dir: info.dir,
relevance: MaxRelevance - 1,
}
if info.rootType == gopathwalk.RootGOROOT {
p.relevance = 0
p.relevance = MaxRelevance
}
if !callback.dirFound(p) {
return
}
var err error
p.packageName, err = r.cache.CachePackageName(info)
if err != nil {
return
}
if !callback.packageNameLoaded(p) {
return
}
if _, exports, err := r.loadExports(ctx, p, false); err == nil {
callback.exportsLoaded(p, exports)
}
result = append(result, p)
}
return result, nil
stop := r.cache.ScanAndListen(ctx, processDir)
defer stop()
// The callback is not necessarily safe to use in the goroutine below. Process roots eagerly.
roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), callback.rootFound)
// We can't cancel walks, because we need them to finish to have a usable
// cache. Instead, run them in a separate goroutine and detach.
scanDone := make(chan struct{})
go func() {
select {
case <-ctx.Done():
return
case <-r.scanSema:
}
defer func() { r.scanSema <- struct{}{} }()
gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false})
close(scanDone)
}()
select {
case <-ctx.Done():
case <-scanDone:
}
return nil
}
func filterRoots(roots []gopathwalk.Root, exclude []gopathwalk.RootType) []gopathwalk.Root {
func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) int {
if _, ok := stdlib[path]; ok {
return MaxRelevance
}
return MaxRelevance - 1
}
func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) []gopathwalk.Root {
var result []gopathwalk.Root
outer:
for _, root := range roots {
for _, i := range exclude {
if i == root.Type {
continue outer
}
if !include(root) {
continue
}
result = append(result, root)
}
return result
}
func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) {
r.init()
if info, ok := r.cache.Load(pkg.dir); ok {
func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) {
if info, ok := r.cache.Load(pkg.dir); ok && !includeTest {
return r.cache.CacheExports(ctx, r.env, info)
}
return loadExportsFromFiles(ctx, r.env, pkg.dir)
return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest)
}
// VendorlessPath returns the devendorized version of the import path ipath.
@ -1261,7 +1286,7 @@ func VendorlessPath(ipath string) string {
return ipath
}
func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (string, []string, error) {
func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) {
var exports []string
// Look for non-test, buildable .go files which could provide exports.
@ -1272,7 +1297,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str
var files []os.FileInfo
for _, fi := range all {
name := fi.Name()
if !strings.HasSuffix(name, ".go") || strings.HasSuffix(name, "_test.go") {
if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) {
continue
}
match, err := env.buildContext().MatchFile(dir, fi.Name())
@ -1305,6 +1330,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str
// handled by MatchFile above.
continue
}
if includeTest && strings.HasSuffix(f.Name.Name, "_test") {
// x_test package. We want internal test files only.
continue
}
pkgName = f.Name.Name
for name := range f.Scope.Objects {
if ast.IsExported(name) {
@ -1323,29 +1352,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str
// findImport searches for a package with the given symbols.
// If no package is found, findImport returns ("", false, nil)
func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, symbols map[string]bool, filename string) (*pkg, error) {
pkgDir, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
pkgDir = filepath.Dir(pkgDir)
// Find candidate packages, looking only at their directory names first.
var candidates []pkgDistance
for _, pkg := range dirScan {
if pkg.dir == pkgDir && pass.f.Name.Name == pkgName {
// The candidate is in the same directory and has the
// same package name. Don't try to import ourselves.
continue
}
if pkgIsCandidate(filename, pkgName, pkg) {
candidates = append(candidates, pkgDistance{
pkg: pkg,
distance: distance(pkgDir, pkg.dir),
})
}
}
func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) {
// Sort the candidates by their import package length,
// assuming that shorter package names are better than long
// ones. Note that this sorts by the de-vendored name, so
@ -1358,7 +1365,6 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string,
}
// Collect exports for packages with matching names.
rescv := make([]chan *pkg, len(candidates))
for i := range candidates {
rescv[i] = make(chan *pkg, 1)
@ -1393,7 +1399,9 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string,
if pass.env.Debug {
pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
}
exports, err := loadExportsForPackage(ctx, pass.env, pkgName, c.pkg)
// If we're an x_test, load the package under test's test variant.
includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir
_, exports, err := pass.env.GetResolver().loadExports(ctx, c.pkg, includeTest)
if err != nil {
if pass.env.Debug {
pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
@ -1430,17 +1438,6 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string,
return nil, nil
}
func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg string, pkg *pkg) ([]string, error) {
pkgName, exports, err := env.GetResolver().loadExports(ctx, pkg)
if err != nil {
return nil, err
}
if expectPkg != pkgName {
return nil, fmt.Errorf("dir %v is package %v, wanted %v", pkg.dir, pkgName, expectPkg)
}
return exports, err
}
// pkgIsCandidate reports whether pkg is a candidate for satisfying the
// finding which package pkgIdent in the file named by filename is trying
// to refer to.
@ -1453,7 +1450,7 @@ func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg strin
// filename is the file being formatted.
// pkgIdent is the package being searched for, like "client" (if
// searching for "client.New")
func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool {
func pkgIsCandidate(filename string, refs references, pkg *pkg) bool {
// Check "internal" and "vendor" visibility:
if !canUse(filename, pkg.dir) {
return false
@ -1471,17 +1468,18 @@ func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool {
// "bar", which is strongly discouraged
// anyway. There's no reason goimports needs
// to be slow just to accommodate that.
lastTwo := lastTwoComponents(pkg.importPathShort)
if strings.Contains(lastTwo, pkgIdent) {
return true
}
if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) {
lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
for pkgIdent := range refs {
lastTwo := lastTwoComponents(pkg.importPathShort)
if strings.Contains(lastTwo, pkgIdent) {
return true
}
if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) {
lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
if strings.Contains(lastTwo, pkgIdent) {
return true
}
}
}
return false
}

View file

@ -11,6 +11,7 @@ package imports
import (
"bufio"
"bytes"
"context"
"fmt"
"go/ast"
"go/build"
@ -21,6 +22,7 @@ import (
"io"
"io/ioutil"
"log"
"os"
"regexp"
"strconv"
"strings"
@ -114,23 +116,23 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
return formatFile(fileSet, file, src, nil, opt)
}
// GetAllCandidates gets all of the standard library candidate packages to import in
// sorted order on import path.
func GetAllCandidates(filename string, opt *Options) (pkgs []ImportFix, err error) {
_, opt, err = initialize(filename, nil, opt)
// GetAllCandidates gets all of the packages starting with prefix that can be
// imported by filename, sorted by import path.
func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefix, filename, filePkg string, opt *Options) error {
_, opt, err := initialize(filename, []byte{}, opt)
if err != nil {
return nil, err
return err
}
return getAllCandidates(filename, opt.Env)
return getAllCandidates(ctx, callback, searchPrefix, filename, filePkg, opt.Env)
}
// GetPackageExports returns all known packages with name pkg and their exports.
func GetPackageExports(pkg, filename string, opt *Options) (exports []PackageExport, err error) {
_, opt, err = initialize(filename, nil, opt)
func GetPackageExports(ctx context.Context, callback func(PackageExport), searchPkg, filename, filePkg string, opt *Options) error {
_, opt, err := initialize(filename, []byte{}, opt)
if err != nil {
return nil, err
return err
}
return getPackageExports(pkg, filename, opt.Env)
return getPackageExports(ctx, callback, searchPkg, filename, filePkg, opt.Env)
}
// initialize sets the values for opt and src.
@ -145,8 +147,12 @@ func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, er
// Set the env if the user has not provided it.
if opt.Env == nil {
opt.Env = &ProcessEnv{
GOPATH: build.Default.GOPATH,
GOROOT: build.Default.GOROOT,
GOPATH: build.Default.GOPATH,
GOROOT: build.Default.GOROOT,
GOFLAGS: os.Getenv("GOFLAGS"),
GO111MODULE: os.Getenv("GO111MODULE"),
GOPROXY: os.Getenv("GOPROXY"),
GOSUMDB: os.Getenv("GOSUMDB"),
}
}

View file

@ -13,11 +13,10 @@ import (
"sort"
"strconv"
"strings"
"sync"
"golang.org/x/mod/module"
"golang.org/x/mod/semver"
"golang.org/x/tools/internal/gopathwalk"
"golang.org/x/tools/internal/module"
"golang.org/x/tools/internal/semver"
)
// ModuleResolver implements resolver for modules using the go command as little
@ -26,11 +25,14 @@ type ModuleResolver struct {
env *ProcessEnv
moduleCacheDir string
dummyVendorMod *ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory.
roots []gopathwalk.Root
scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots.
scannedRoots map[gopathwalk.Root]bool
Initialized bool
Main *ModuleJSON
ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path...
ModsByDir []*ModuleJSON // ...or Dir.
initialized bool
main *ModuleJSON
modsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path...
modsByDir []*ModuleJSON // ...or Dir.
// moduleCacheCache stores information about the module cache.
moduleCacheCache *dirInfoCache
@ -41,13 +43,23 @@ type ModuleJSON struct {
Path string // module path
Replace *ModuleJSON // replaced by this module
Main bool // is this the main module?
Indirect bool // is this module only an indirect dependency of main module?
Dir string // directory holding files for this module, if any
GoMod string // path to go.mod file for this module, if any
GoVersion string // go version used in module
}
func newModuleResolver(e *ProcessEnv) *ModuleResolver {
r := &ModuleResolver{
env: e,
scanSema: make(chan struct{}, 1),
}
r.scanSema <- struct{}{}
return r
}
func (r *ModuleResolver) init() error {
if r.Initialized {
if r.initialized {
return nil
}
mainMod, vendorEnabled, err := vendorEnabled(r.env)
@ -58,13 +70,13 @@ func (r *ModuleResolver) init() error {
if mainMod != nil && vendorEnabled {
// Vendor mode is on, so all the non-Main modules are irrelevant,
// and we need to search /vendor for everything.
r.Main = mainMod
r.main = mainMod
r.dummyVendorMod = &ModuleJSON{
Path: "",
Dir: filepath.Join(mainMod.Dir, "vendor"),
}
r.ModsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod}
r.ModsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod}
r.modsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod}
r.modsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod}
} else {
// Vendor mode is off, so run go list -m ... to find everything.
r.initAllMods()
@ -72,35 +84,69 @@ func (r *ModuleResolver) init() error {
r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod")
sort.Slice(r.ModsByModPath, func(i, j int) bool {
sort.Slice(r.modsByModPath, func(i, j int) bool {
count := func(x int) int {
return strings.Count(r.ModsByModPath[x].Path, "/")
return strings.Count(r.modsByModPath[x].Path, "/")
}
return count(j) < count(i) // descending order
})
sort.Slice(r.ModsByDir, func(i, j int) bool {
sort.Slice(r.modsByDir, func(i, j int) bool {
count := func(x int) int {
return strings.Count(r.ModsByDir[x].Dir, "/")
return strings.Count(r.modsByDir[x].Dir, "/")
}
return count(j) < count(i) // descending order
})
r.roots = []gopathwalk.Root{
{filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
}
if r.main != nil {
r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
}
if vendorEnabled {
r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther})
} else {
addDep := func(mod *ModuleJSON) {
if mod.Replace == nil {
// This is redundant with the cache, but we'll skip it cheaply enough.
r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache})
} else {
r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
}
}
// Walk dependent modules before scanning the full mod cache, direct deps first.
for _, mod := range r.modsByModPath {
if !mod.Indirect && !mod.Main {
addDep(mod)
}
}
for _, mod := range r.modsByModPath {
if mod.Indirect && !mod.Main {
addDep(mod)
}
}
r.roots = append(r.roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache})
}
r.scannedRoots = map[gopathwalk.Root]bool{}
if r.moduleCacheCache == nil {
r.moduleCacheCache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
dirs: map[string]*directoryPackageInfo{},
listeners: map[*int]cacheListener{},
}
}
if r.otherCache == nil {
r.otherCache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
dirs: map[string]*directoryPackageInfo{},
listeners: map[*int]cacheListener{},
}
}
r.Initialized = true
r.initialized = true
return nil
}
func (r *ModuleResolver) initAllMods() error {
stdout, err := r.env.invokeGo("list", "-m", "-json", "...")
stdout, err := r.env.invokeGo(context.TODO(), "list", "-m", "-json", "...")
if err != nil {
return err
}
@ -116,27 +162,37 @@ func (r *ModuleResolver) initAllMods() error {
// Can't do anything with a module that's not downloaded.
continue
}
r.ModsByModPath = append(r.ModsByModPath, mod)
r.ModsByDir = append(r.ModsByDir, mod)
// golang/go#36193: the go command doesn't always clean paths.
mod.Dir = filepath.Clean(mod.Dir)
r.modsByModPath = append(r.modsByModPath, mod)
r.modsByDir = append(r.modsByDir, mod)
if mod.Main {
r.Main = mod
r.main = mod
}
}
return nil
}
func (r *ModuleResolver) ClearForNewScan() {
<-r.scanSema
r.scannedRoots = map[gopathwalk.Root]bool{}
r.otherCache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
dirs: map[string]*directoryPackageInfo{},
listeners: map[*int]cacheListener{},
}
r.scanSema <- struct{}{}
}
func (r *ModuleResolver) ClearForNewMod() {
env := r.env
<-r.scanSema
*r = ModuleResolver{
env: env,
env: r.env,
moduleCacheCache: r.moduleCacheCache,
otherCache: r.otherCache,
scanSema: r.scanSema,
}
r.init()
r.scanSema <- struct{}{}
}
// findPackage returns the module and directory that contains the package at
@ -144,7 +200,7 @@ func (r *ModuleResolver) ClearForNewMod() {
func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) {
// This can't find packages in the stdlib, but that's harmless for all
// the existing code paths.
for _, m := range r.ModsByModPath {
for _, m := range r.modsByModPath {
if !strings.HasPrefix(importPath, m.Path) {
continue
}
@ -211,7 +267,7 @@ func (r *ModuleResolver) cacheKeys() []string {
}
// cachePackageName caches the package name for a dir already in the cache.
func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) {
func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) {
if info.rootType == gopathwalk.RootModuleCache {
return r.moduleCacheCache.CachePackageName(info)
}
@ -238,7 +294,7 @@ func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON {
// - in /vendor/ in -mod=vendor mode.
// - nested module? Dunno.
// Rumor has it that replace targets cannot contain other replace targets.
for _, m := range r.ModsByDir {
for _, m := range r.modsByDir {
if !strings.HasPrefix(dir, m.Dir) {
continue
}
@ -333,41 +389,49 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (
return names, nil
}
func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) {
func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error {
if err := r.init(); err != nil {
return nil, err
return err
}
// Walk GOROOT, GOPATH/pkg/mod, and the main module.
roots := []gopathwalk.Root{
{filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
}
if r.Main != nil {
roots = append(roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule})
}
if r.dummyVendorMod != nil {
roots = append(roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther})
} else {
roots = append(roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache})
// Walk replace targets, just in case they're not in any of the above.
for _, mod := range r.ModsByModPath {
if mod.Replace != nil {
roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
}
processDir := func(info directoryPackageInfo) {
// Skip this directory if we were not able to get the package information successfully.
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
return
}
pkg, err := r.canonicalize(info)
if err != nil {
return
}
if !callback.dirFound(pkg) {
return
}
pkg.packageName, err = r.cachePackageName(info)
if err != nil {
return
}
if !callback.packageNameLoaded(pkg) {
return
}
_, exports, err := r.loadExports(ctx, pkg, false)
if err != nil {
return
}
callback.exportsLoaded(pkg, exports)
}
roots = filterRoots(roots, exclude)
// Start processing everything in the cache, and listen for the new stuff
// we discover in the walk below.
stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir)
defer stop1()
stop2 := r.otherCache.ScanAndListen(ctx, processDir)
defer stop2()
var result []*pkg
var mu sync.Mutex
// We assume cached directories have not changed. We can skip them and their
// children.
// We assume cached directories are fully cached, including all their
// children, and have not changed. We can skip them.
skip := func(root gopathwalk.Root, dir string) bool {
mu.Lock()
defer mu.Unlock()
info, ok := r.cacheLoad(dir)
if !ok {
return false
@ -379,44 +443,64 @@ func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk
return packageScanned
}
// Add anything new to the cache. We'll process everything in it below.
// Add anything new to the cache, and process it if we're still listening.
add := func(root gopathwalk.Root, dir string) {
mu.Lock()
defer mu.Unlock()
r.cacheStore(r.scanDirForPackage(root, dir))
}
gopathwalk.WalkSkip(roots, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true})
// Everything we already had, and everything new, is now in the cache.
for _, dir := range r.cacheKeys() {
info, ok := r.cacheLoad(dir)
if !ok {
continue
// r.roots and the callback are not necessarily safe to use in the
// goroutine below. Process them eagerly.
roots := filterRoots(r.roots, callback.rootFound)
// We can't cancel walks, because we need them to finish to have a usable
// cache. Instead, run them in a separate goroutine and detach.
scanDone := make(chan struct{})
go func() {
select {
case <-ctx.Done():
return
case <-r.scanSema:
}
defer func() { r.scanSema <- struct{}{} }()
// We have the lock on r.scannedRoots, and no other scans can run.
for _, root := range roots {
if ctx.Err() != nil {
return
}
// Skip this directory if we were not able to get the package information successfully.
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
continue
}
// If we want package names, make sure the cache has them.
if loadNames {
var err error
if info, err = r.cachePackageName(info); err != nil {
if r.scannedRoots[root] {
continue
}
gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true})
r.scannedRoots[root] = true
}
res, err := r.canonicalize(info)
if err != nil {
continue
}
result = append(result, res)
close(scanDone)
}()
select {
case <-ctx.Done():
case <-scanDone:
}
return nil
}
return result, nil
func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int {
if _, ok := stdlib[path]; ok {
return MaxRelevance
}
mod, _ := r.findPackage(path)
return modRelevance(mod)
}
func modRelevance(mod *ModuleJSON) int {
switch {
case mod == nil: // out of scope
return MaxRelevance - 4
case mod.Indirect:
return MaxRelevance - 3
case !mod.Main:
return MaxRelevance - 2
default:
return MaxRelevance - 1 // main module ties with stdlib
}
}
// canonicalize gets the result of canonicalizing the packages using the results
@ -428,15 +512,14 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
importPathShort: info.nonCanonicalImportPath,
dir: info.dir,
packageName: path.Base(info.nonCanonicalImportPath),
relevance: 0,
relevance: MaxRelevance,
}, nil
}
importPath := info.nonCanonicalImportPath
relevance := 2
mod := r.findModuleByDir(info.dir)
// Check if the directory is underneath a module that's in scope.
if mod := r.findModuleByDir(info.dir); mod != nil {
relevance = 1
if mod != nil {
// It is. If dir is the target of a replace directive,
// our guessed import path is wrong. Use the real one.
if mod.Dir == info.dir {
@ -445,15 +528,16 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
dirInMod := info.dir[len(mod.Dir)+len("/"):]
importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod))
}
} else if info.needsReplace {
} else if !strings.HasPrefix(importPath, info.moduleName) {
// The module's name doesn't match the package's import path. It
// probably needs a replace directive we don't have.
return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir)
}
res := &pkg{
importPathShort: importPath,
dir: info.dir,
packageName: info.packageName, // may not be populated if the caller didn't ask for it
relevance: relevance,
relevance: modRelevance(mod),
}
// We may have discovered a package that has a different version
// in scope already. Canonicalize to that one if possible.
@ -463,14 +547,14 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
return res, nil
}
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) {
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) {
if err := r.init(); err != nil {
return "", nil, err
}
if info, ok := r.cacheLoad(pkg.dir); ok {
if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest {
return r.cacheExports(ctx, r.env, info)
}
return loadExportsFromFiles(ctx, r.env, pkg.dir)
return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest)
}
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
@ -488,7 +572,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
}
switch root.Type {
case gopathwalk.RootCurrentModule:
importPath = path.Join(r.Main.Path, filepath.ToSlash(subdir))
importPath = path.Join(r.main.Path, filepath.ToSlash(subdir))
case gopathwalk.RootModuleCache:
matches := modCacheRegexp.FindStringSubmatch(subdir)
if len(matches) == 0 {
@ -497,7 +581,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
err: fmt.Errorf("invalid module cache path: %v", subdir),
}
}
modPath, err := module.DecodePath(filepath.ToSlash(matches[1]))
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
if err != nil {
if r.env.Debug {
r.env.Logf("decoding module cache path %q: %v", subdir, err)
@ -516,7 +600,6 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
dir: dir,
rootType: root.Type,
nonCanonicalImportPath: importPath,
needsReplace: false,
moduleDir: modDir,
moduleName: modName,
}
@ -524,14 +607,6 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
// stdlib packages are always in scope, despite the confusing go.mod
return result
}
// Check that this package is not obviously impossible to import.
if !strings.HasPrefix(importPath, modName) {
// The module's declared path does not match
// its expected path. It probably needs a
// replace directive we don't have.
result.needsReplace = true
}
return result
}
@ -624,7 +699,7 @@ func getMainModuleAnd114(env *ProcessEnv) (*ModuleJSON, bool, error) {
{{.GoVersion}}
{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}}
`
stdout, err := env.invokeGo("list", "-m", "-f", format)
stdout, err := env.invokeGo(context.TODO(), "list", "-m", "-f", format)
if err != nil {
return nil, false, nil
}

View file

@ -49,10 +49,6 @@ type directoryPackageInfo struct {
// nonCanonicalImportPath is the package's expected import path. It may
// not actually be importable at that path.
nonCanonicalImportPath string
// needsReplace is true if the nonCanonicalImportPath does not match the
// module's declared path, making it impossible to import without a
// replace directive.
needsReplace bool
// Module-related information.
moduleDir string // The directory that is the module root of this dir.
@ -97,15 +93,86 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (
type dirInfoCache struct {
mu sync.Mutex
// dirs stores information about packages in directories, keyed by absolute path.
dirs map[string]*directoryPackageInfo
dirs map[string]*directoryPackageInfo
listeners map[*int]cacheListener
}
type cacheListener func(directoryPackageInfo)
// ScanAndListen calls listener on all the items in the cache, and on anything
// newly added. The returned stop function waits for all in-flight callbacks to
// finish and blocks new ones.
func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
ctx, cancel := context.WithCancel(ctx)
// Flushing out all the callbacks is tricky without knowing how many there
// are going to be. Setting an arbitrary limit makes it much easier.
const maxInFlight = 10
sema := make(chan struct{}, maxInFlight)
for i := 0; i < maxInFlight; i++ {
sema <- struct{}{}
}
cookie := new(int) // A unique ID we can use for the listener.
// We can't hold mu while calling the listener.
d.mu.Lock()
var keys []string
for key := range d.dirs {
keys = append(keys, key)
}
d.listeners[cookie] = func(info directoryPackageInfo) {
select {
case <-ctx.Done():
return
case <-sema:
}
listener(info)
sema <- struct{}{}
}
d.mu.Unlock()
stop := func() {
cancel()
d.mu.Lock()
delete(d.listeners, cookie)
d.mu.Unlock()
for i := 0; i < maxInFlight; i++ {
<-sema
}
}
// Process the pre-existing keys.
for _, k := range keys {
select {
case <-ctx.Done():
return stop
default:
}
if v, ok := d.Load(k); ok {
listener(v)
}
}
return stop
}
// Store stores the package info for dir.
func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
d.mu.Lock()
defer d.mu.Unlock()
stored := info // defensive copy
d.dirs[dir] = &stored
_, old := d.dirs[dir]
d.dirs[dir] = &info
var listeners []cacheListener
for _, l := range d.listeners {
listeners = append(listeners, l)
}
d.mu.Unlock()
if !old {
for _, l := range listeners {
l(info)
}
}
}
// Load returns a copy of the directoryPackageInfo for absolute directory dir.
@ -129,17 +196,17 @@ func (d *dirInfoCache) Keys() (keys []string) {
return keys
}
func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) {
func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
if loaded, err := info.reachedStatus(nameLoaded); loaded {
return info, err
return info.packageName, err
}
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
return info, fmt.Errorf("cannot read package name, scan error: %v", err)
return "", fmt.Errorf("cannot read package name, scan error: %v", err)
}
info.packageName, info.err = packageDirToName(info.dir)
info.status = nameLoaded
d.Store(info.dir, info)
return info, info.err
return info.packageName, info.err
}
func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
@ -149,8 +216,8 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d
if reached, err := info.reachedStatus(nameLoaded); reached && err != nil {
return "", nil, err
}
info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir)
if info.err == context.Canceled {
info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false)
if info.err == context.Canceled || info.err == context.DeadlineExceeded {
return info.packageName, info.exports, info.err
}
// The cache structure wants things to proceed linearly. We can skip a

View file

@ -1,540 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package module defines the module.Version type
// along with support code.
package module
// IMPORTANT NOTE
//
// This file essentially defines the set of valid import paths for the go command.
// There are many subtle considerations, including Unicode ambiguity,
// security, network, and file system representations.
//
// This file also defines the set of valid module path and version combinations,
// another topic with many subtle considerations.
//
// Changes to the semantics in this file require approval from rsc.
import (
"fmt"
"sort"
"strings"
"unicode"
"unicode/utf8"
"golang.org/x/tools/internal/semver"
)
// A Version is defined by a module path and version pair.
type Version struct {
Path string
// Version is usually a semantic version in canonical form.
// There are two exceptions to this general rule.
// First, the top-level target of a build has no specific version
// and uses Version = "".
// Second, during MVS calculations the version "none" is used
// to represent the decision to take no version of a given module.
Version string `json:",omitempty"`
}
// Check checks that a given module path, version pair is valid.
// In addition to the path being a valid module path
// and the version being a valid semantic version,
// the two must correspond.
// For example, the path "yaml/v2" only corresponds to
// semantic versions beginning with "v2.".
func Check(path, version string) error {
if err := CheckPath(path); err != nil {
return err
}
if !semver.IsValid(version) {
return fmt.Errorf("malformed semantic version %v", version)
}
_, pathMajor, _ := SplitPathVersion(path)
if !MatchPathMajor(version, pathMajor) {
if pathMajor == "" {
pathMajor = "v0 or v1"
}
if pathMajor[0] == '.' { // .v1
pathMajor = pathMajor[1:]
}
return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor)
}
return nil
}
// firstPathOK reports whether r can appear in the first element of a module path.
// The first element of the path must be an LDH domain name, at least for now.
// To avoid case ambiguity, the domain name must be entirely lower case.
func firstPathOK(r rune) bool {
return r == '-' || r == '.' ||
'0' <= r && r <= '9' ||
'a' <= r && r <= 'z'
}
// pathOK reports whether r can appear in an import path element.
// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
// This matches what "go get" has historically recognized in import paths.
// TODO(rsc): We would like to allow Unicode letters, but that requires additional
// care in the safe encoding (see note below).
func pathOK(r rune) bool {
if r < utf8.RuneSelf {
return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' ||
'0' <= r && r <= '9' ||
'A' <= r && r <= 'Z' ||
'a' <= r && r <= 'z'
}
return false
}
// fileNameOK reports whether r can appear in a file name.
// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
// If we expand the set of allowed characters here, we have to
// work harder at detecting potential case-folding and normalization collisions.
// See note about "safe encoding" below.
func fileNameOK(r rune) bool {
if r < utf8.RuneSelf {
// Entire set of ASCII punctuation, from which we remove characters:
// ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
// We disallow some shell special characters: " ' * < > ? ` |
// (Note that some of those are disallowed by the Windows file system as well.)
// We also disallow path separators / : and \ (fileNameOK is only called on path element characters).
// We allow spaces (U+0020) in file names.
const allowed = "!#$%&()+,-.=@[]^_{}~ "
if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
return true
}
for i := 0; i < len(allowed); i++ {
if rune(allowed[i]) == r {
return true
}
}
return false
}
// It may be OK to add more ASCII punctuation here, but only carefully.
// For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
return unicode.IsLetter(r)
}
// CheckPath checks that a module path is valid.
func CheckPath(path string) error {
if err := checkPath(path, false); err != nil {
return fmt.Errorf("malformed module path %q: %v", path, err)
}
i := strings.Index(path, "/")
if i < 0 {
i = len(path)
}
if i == 0 {
return fmt.Errorf("malformed module path %q: leading slash", path)
}
if !strings.Contains(path[:i], ".") {
return fmt.Errorf("malformed module path %q: missing dot in first path element", path)
}
if path[0] == '-' {
return fmt.Errorf("malformed module path %q: leading dash in first path element", path)
}
for _, r := range path[:i] {
if !firstPathOK(r) {
return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r)
}
}
if _, _, ok := SplitPathVersion(path); !ok {
return fmt.Errorf("malformed module path %q: invalid version", path)
}
return nil
}
// CheckImportPath checks that an import path is valid.
func CheckImportPath(path string) error {
if err := checkPath(path, false); err != nil {
return fmt.Errorf("malformed import path %q: %v", path, err)
}
return nil
}
// checkPath checks that a general path is valid.
// It returns an error describing why but not mentioning path.
// Because these checks apply to both module paths and import paths,
// the caller is expected to add the "malformed ___ path %q: " prefix.
// fileName indicates whether the final element of the path is a file name
// (as opposed to a directory name).
func checkPath(path string, fileName bool) error {
if !utf8.ValidString(path) {
return fmt.Errorf("invalid UTF-8")
}
if path == "" {
return fmt.Errorf("empty string")
}
if strings.Contains(path, "..") {
return fmt.Errorf("double dot")
}
if strings.Contains(path, "//") {
return fmt.Errorf("double slash")
}
if path[len(path)-1] == '/' {
return fmt.Errorf("trailing slash")
}
elemStart := 0
for i, r := range path {
if r == '/' {
if err := checkElem(path[elemStart:i], fileName); err != nil {
return err
}
elemStart = i + 1
}
}
if err := checkElem(path[elemStart:], fileName); err != nil {
return err
}
return nil
}
// checkElem checks whether an individual path element is valid.
// fileName indicates whether the element is a file name (not a directory name).
func checkElem(elem string, fileName bool) error {
if elem == "" {
return fmt.Errorf("empty path element")
}
if strings.Count(elem, ".") == len(elem) {
return fmt.Errorf("invalid path element %q", elem)
}
if elem[0] == '.' && !fileName {
return fmt.Errorf("leading dot in path element")
}
if elem[len(elem)-1] == '.' {
return fmt.Errorf("trailing dot in path element")
}
charOK := pathOK
if fileName {
charOK = fileNameOK
}
for _, r := range elem {
if !charOK(r) {
return fmt.Errorf("invalid char %q", r)
}
}
// Windows disallows a bunch of path elements, sadly.
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
short := elem
if i := strings.Index(short, "."); i >= 0 {
short = short[:i]
}
for _, bad := range badWindowsNames {
if strings.EqualFold(bad, short) {
return fmt.Errorf("disallowed path element %q", elem)
}
}
return nil
}
// CheckFilePath checks whether a slash-separated file path is valid.
func CheckFilePath(path string) error {
if err := checkPath(path, true); err != nil {
return fmt.Errorf("malformed file path %q: %v", path, err)
}
return nil
}
// badWindowsNames are the reserved file path elements on Windows.
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
var badWindowsNames = []string{
"CON",
"PRN",
"AUX",
"NUL",
"COM1",
"COM2",
"COM3",
"COM4",
"COM5",
"COM6",
"COM7",
"COM8",
"COM9",
"LPT1",
"LPT2",
"LPT3",
"LPT4",
"LPT5",
"LPT6",
"LPT7",
"LPT8",
"LPT9",
}
// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path
// and version is either empty or "/vN" for N >= 2.
// As a special case, gopkg.in paths are recognized directly;
// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
if strings.HasPrefix(path, "gopkg.in/") {
return splitGopkgIn(path)
}
i := len(path)
dot := false
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') {
if path[i-1] == '.' {
dot = true
}
i--
}
if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' {
return path, "", true
}
prefix, pathMajor = path[:i-2], path[i-2:]
if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" {
return path, "", false
}
return prefix, pathMajor, true
}
// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths.
func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
if !strings.HasPrefix(path, "gopkg.in/") {
return path, "", false
}
i := len(path)
if strings.HasSuffix(path, "-unstable") {
i -= len("-unstable")
}
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') {
i--
}
if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' {
// All gopkg.in paths must end in vN for some N.
return path, "", false
}
prefix, pathMajor = path[:i-2], path[i-2:]
if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" {
return path, "", false
}
return prefix, pathMajor, true
}
// MatchPathMajor reports whether the semantic version v
// matches the path major version pathMajor.
func MatchPathMajor(v, pathMajor string) bool {
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
}
if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
// Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
// For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
return true
}
m := semver.Major(v)
if pathMajor == "" {
return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible"
}
return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:]
}
// CanonicalVersion returns the canonical form of the version string v.
// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
func CanonicalVersion(v string) string {
cv := semver.Canonical(v)
if semver.Build(v) == "+incompatible" {
cv += "+incompatible"
}
return cv
}
// Sort sorts the list by Path, breaking ties by comparing Versions.
func Sort(list []Version) {
sort.Slice(list, func(i, j int) bool {
mi := list[i]
mj := list[j]
if mi.Path != mj.Path {
return mi.Path < mj.Path
}
// To help go.sum formatting, allow version/file.
// Compare semver prefix by semver rules,
// file by string order.
vi := mi.Version
vj := mj.Version
var fi, fj string
if k := strings.Index(vi, "/"); k >= 0 {
vi, fi = vi[:k], vi[k:]
}
if k := strings.Index(vj, "/"); k >= 0 {
vj, fj = vj[:k], vj[k:]
}
if vi != vj {
return semver.Compare(vi, vj) < 0
}
return fi < fj
})
}
// Safe encodings
//
// Module paths appear as substrings of file system paths
// (in the download cache) and of web server URLs in the proxy protocol.
// In general we cannot rely on file systems to be case-sensitive,
// nor can we rely on web servers, since they read from file systems.
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
// and rsc.io/quote separate. Windows and macOS don't.
// Instead, we must never require two different casings of a file path.
// Because we want the download cache to match the proxy protocol,
// and because we want the proxy protocol to be possible to serve
// from a tree of static files (which might be stored on a case-insensitive
// file system), the proxy protocol must never require two different casings
// of a URL path either.
//
// One possibility would be to make the safe encoding be the lowercase
// hexadecimal encoding of the actual path bytes. This would avoid ever
// needing different casings of a file path, but it would be fairly illegible
// to most programmers when those paths appeared in the file system
// (including in file paths in compiler errors and stack traces)
// in web server logs, and so on. Instead, we want a safe encoding that
// leaves most paths unaltered.
//
// The safe encoding is this:
// replace every uppercase letter with an exclamation mark
// followed by the letter's lowercase equivalent.
//
// For example,
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
//
// Import paths that avoid upper-case letters are left unchanged.
// Note that because import paths are ASCII-only and avoid various
// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
// and avoids the same problematic punctuation.
//
// Import paths have never allowed exclamation marks, so there is no
// need to define how to encode a literal !.
//
// Although paths are disallowed from using Unicode (see pathOK above),
// the eventual plan is to allow Unicode letters as well, to assume that
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
// the !-for-uppercase convention. Note however that not all runes that
// are different but case-fold equivalent are an upper/lower pair.
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('' for Kelvin)
// are considered to case-fold to each other. When we do add Unicode
// letters, we must not assume that upper/lower are the only case-equivalent pairs.
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
//
// Also, it would be nice to allow Unicode marks as well as letters,
// but marks include combining marks, and then we must deal not
// only with case folding but also normalization: both U+00E9 ('é')
// and U+0065 U+0301 ('e' followed by combining acute accent)
// look the same on the page and are treated by some file systems
// as the same path. If we do allow Unicode marks in paths, there
// must be some kind of normalization to allow only one canonical
// encoding of any character used in an import path.
// EncodePath returns the safe encoding of the given module path.
// It fails if the module path is invalid.
func EncodePath(path string) (encoding string, err error) {
if err := CheckPath(path); err != nil {
return "", err
}
return encodeString(path)
}
// EncodeVersion returns the safe encoding of the given module version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func EncodeVersion(v string) (encoding string, err error) {
if err := checkElem(v, true); err != nil || strings.Contains(v, "!") {
return "", fmt.Errorf("disallowed version string %q", v)
}
return encodeString(v)
}
func encodeString(s string) (encoding string, err error) {
haveUpper := false
for _, r := range s {
if r == '!' || r >= utf8.RuneSelf {
// This should be disallowed by CheckPath, but diagnose anyway.
// The correctness of the encoding loop below depends on it.
return "", fmt.Errorf("internal error: inconsistency in EncodePath")
}
if 'A' <= r && r <= 'Z' {
haveUpper = true
}
}
if !haveUpper {
return s, nil
}
var buf []byte
for _, r := range s {
if 'A' <= r && r <= 'Z' {
buf = append(buf, '!', byte(r+'a'-'A'))
} else {
buf = append(buf, byte(r))
}
}
return string(buf), nil
}
// DecodePath returns the module path of the given safe encoding.
// It fails if the encoding is invalid or encodes an invalid path.
func DecodePath(encoding string) (path string, err error) {
path, ok := decodeString(encoding)
if !ok {
return "", fmt.Errorf("invalid module path encoding %q", encoding)
}
if err := CheckPath(path); err != nil {
return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err)
}
return path, nil
}
// DecodeVersion returns the version string for the given safe encoding.
// It fails if the encoding is invalid or encodes an invalid version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func DecodeVersion(encoding string) (v string, err error) {
v, ok := decodeString(encoding)
if !ok {
return "", fmt.Errorf("invalid version encoding %q", encoding)
}
if err := checkElem(v, true); err != nil {
return "", fmt.Errorf("disallowed version string %q", v)
}
return v, nil
}
func decodeString(encoding string) (string, bool) {
var buf []byte
bang := false
for _, r := range encoding {
if r >= utf8.RuneSelf {
return "", false
}
if bang {
bang = false
if r < 'a' || 'z' < r {
return "", false
}
buf = append(buf, byte(r+'A'-'a'))
continue
}
if r == '!' {
bang = true
continue
}
if 'A' <= r && r <= 'Z' {
return "", false
}
buf = append(buf, byte(r))
}
if bang {
return "", false
}
return string(buf), true
}

View file

@ -0,0 +1,27 @@
// Package packagesinternal exposes internal-only fields from go/packages.
package packagesinternal
import "time"
// Fields must match go list;
type Module struct {
Path string // module path
Version string // module version
Versions []string // available module versions (with -versions)
Replace *Module // replaced by this module
Time *time.Time // time version was created
Update *Module // available update, if any (with -u)
Main bool // is this the main module?
Indirect bool // is this module only an indirect dependency of main module?
Dir string // directory holding files for this module, if any
GoMod string // path to go.mod file used when loading this module, if any
GoVersion string // go version used in module
Error *ModuleError // error loading module
}
type ModuleError struct {
Err string // the error itself
}
var GetForTest = func(p interface{}) string { return "" }
var GetModule = func(p interface{}) *Module { return nil }

View file

@ -1,388 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package semver implements comparison of semantic version strings.
// In this package, semantic version strings must begin with a leading "v",
// as in "v1.0.0".
//
// The general form of a semantic version string accepted by this package is
//
// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
//
// where square brackets indicate optional parts of the syntax;
// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
// using only alphanumeric characters and hyphens; and
// all-numeric PRERELEASE identifiers must not have leading zeros.
//
// This package follows Semantic Versioning 2.0.0 (see semver.org)
// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
package semver
// parsed returns the parsed form of a semantic version string.
type parsed struct {
major string
minor string
patch string
short string
prerelease string
build string
err string
}
// IsValid reports whether v is a valid semantic version string.
func IsValid(v string) bool {
_, ok := parse(v)
return ok
}
// Canonical returns the canonical formatting of the semantic version v.
// It fills in any missing .MINOR or .PATCH and discards build metadata.
// Two semantic versions compare equal only if their canonical formattings
// are identical strings.
// The canonical invalid semantic version is the empty string.
func Canonical(v string) string {
p, ok := parse(v)
if !ok {
return ""
}
if p.build != "" {
return v[:len(v)-len(p.build)]
}
if p.short != "" {
return v + p.short
}
return v
}
// Major returns the major version prefix of the semantic version v.
// For example, Major("v2.1.0") == "v2".
// If v is an invalid semantic version string, Major returns the empty string.
func Major(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return v[:1+len(pv.major)]
}
// MajorMinor returns the major.minor version prefix of the semantic version v.
// For example, MajorMinor("v2.1.0") == "v2.1".
// If v is an invalid semantic version string, MajorMinor returns the empty string.
func MajorMinor(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
i := 1 + len(pv.major)
if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
return v[:j]
}
return v[:i] + "." + pv.minor
}
// Prerelease returns the prerelease suffix of the semantic version v.
// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
// If v is an invalid semantic version string, Prerelease returns the empty string.
func Prerelease(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return pv.prerelease
}
// Build returns the build suffix of the semantic version v.
// For example, Build("v2.1.0+meta") == "+meta".
// If v is an invalid semantic version string, Build returns the empty string.
func Build(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return pv.build
}
// Compare returns an integer comparing two versions according to
// according to semantic version precedence.
// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
//
// An invalid semantic version string is considered less than a valid one.
// All invalid semantic version strings compare equal to each other.
func Compare(v, w string) int {
pv, ok1 := parse(v)
pw, ok2 := parse(w)
if !ok1 && !ok2 {
return 0
}
if !ok1 {
return -1
}
if !ok2 {
return +1
}
if c := compareInt(pv.major, pw.major); c != 0 {
return c
}
if c := compareInt(pv.minor, pw.minor); c != 0 {
return c
}
if c := compareInt(pv.patch, pw.patch); c != 0 {
return c
}
return comparePrerelease(pv.prerelease, pw.prerelease)
}
// Max canonicalizes its arguments and then returns the version string
// that compares greater.
func Max(v, w string) string {
v = Canonical(v)
w = Canonical(w)
if Compare(v, w) > 0 {
return v
}
return w
}
func parse(v string) (p parsed, ok bool) {
if v == "" || v[0] != 'v' {
p.err = "missing v prefix"
return
}
p.major, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad major version"
return
}
if v == "" {
p.minor = "0"
p.patch = "0"
p.short = ".0.0"
return
}
if v[0] != '.' {
p.err = "bad minor prefix"
ok = false
return
}
p.minor, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad minor version"
return
}
if v == "" {
p.patch = "0"
p.short = ".0"
return
}
if v[0] != '.' {
p.err = "bad patch prefix"
ok = false
return
}
p.patch, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad patch version"
return
}
if len(v) > 0 && v[0] == '-' {
p.prerelease, v, ok = parsePrerelease(v)
if !ok {
p.err = "bad prerelease"
return
}
}
if len(v) > 0 && v[0] == '+' {
p.build, v, ok = parseBuild(v)
if !ok {
p.err = "bad build"
return
}
}
if v != "" {
p.err = "junk on end"
ok = false
return
}
ok = true
return
}
func parseInt(v string) (t, rest string, ok bool) {
if v == "" {
return
}
if v[0] < '0' || '9' < v[0] {
return
}
i := 1
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
if v[0] == '0' && i != 1 {
return
}
return v[:i], v[i:], true
}
func parsePrerelease(v string) (t, rest string, ok bool) {
// "A pre-release version MAY be denoted by appending a hyphen and
// a series of dot separated identifiers immediately following the patch version.
// Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
// Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
if v == "" || v[0] != '-' {
return
}
i := 1
start := 1
for i < len(v) && v[i] != '+' {
if !isIdentChar(v[i]) && v[i] != '.' {
return
}
if v[i] == '.' {
if start == i || isBadNum(v[start:i]) {
return
}
start = i + 1
}
i++
}
if start == i || isBadNum(v[start:i]) {
return
}
return v[:i], v[i:], true
}
func parseBuild(v string) (t, rest string, ok bool) {
if v == "" || v[0] != '+' {
return
}
i := 1
start := 1
for i < len(v) {
if !isIdentChar(v[i]) {
return
}
if v[i] == '.' {
if start == i {
return
}
start = i + 1
}
i++
}
if start == i {
return
}
return v[:i], v[i:], true
}
func isIdentChar(c byte) bool {
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
}
func isBadNum(v string) bool {
i := 0
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
return i == len(v) && i > 1 && v[0] == '0'
}
func isNum(v string) bool {
i := 0
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
return i == len(v)
}
func compareInt(x, y string) int {
if x == y {
return 0
}
if len(x) < len(y) {
return -1
}
if len(x) > len(y) {
return +1
}
if x < y {
return -1
} else {
return +1
}
}
func comparePrerelease(x, y string) int {
// "When major, minor, and patch are equal, a pre-release version has
// lower precedence than a normal version.
// Example: 1.0.0-alpha < 1.0.0.
// Precedence for two pre-release versions with the same major, minor,
// and patch version MUST be determined by comparing each dot separated
// identifier from left to right until a difference is found as follows:
// identifiers consisting of only digits are compared numerically and
// identifiers with letters or hyphens are compared lexically in ASCII
// sort order. Numeric identifiers always have lower precedence than
// non-numeric identifiers. A larger set of pre-release fields has a
// higher precedence than a smaller set, if all of the preceding
// identifiers are equal.
// Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
// 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
if x == y {
return 0
}
if x == "" {
return +1
}
if y == "" {
return -1
}
for x != "" && y != "" {
x = x[1:] // skip - or .
y = y[1:] // skip - or .
var dx, dy string
dx, x = nextIdent(x)
dy, y = nextIdent(y)
if dx != dy {
ix := isNum(dx)
iy := isNum(dy)
if ix != iy {
if ix {
return -1
} else {
return +1
}
}
if ix {
if len(dx) < len(dy) {
return -1
}
if len(dx) > len(dy) {
return +1
}
}
if dx < dy {
return -1
} else {
return +1
}
}
}
if x == "" {
return -1
} else {
return +1
}
}
func nextIdent(x string) (dx, rest string) {
i := 0
for i < len(x) && x[i] != '.' {
i++
}
return x[:i], x[i:]
}