Bump github.com/onsi/ginkgo/v2 from 2.22.0 to 2.22.2

Bumps [github.com/onsi/ginkgo/v2](https://github.com/onsi/ginkgo) from 2.22.0 to 2.22.2.
- [Release notes](https://github.com/onsi/ginkgo/releases)
- [Changelog](https://github.com/onsi/ginkgo/blob/master/CHANGELOG.md)
- [Commits](https://github.com/onsi/ginkgo/compare/v2.22.0...v2.22.2)

---
updated-dependencies:
- dependency-name: github.com/onsi/ginkgo/v2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2025-01-21 14:59:20 +00:00
committed by GitHub
parent 2462b80d95
commit 389b946514
43 changed files with 2335 additions and 609 deletions

10
go.mod
View File

@@ -62,8 +62,8 @@ require (
github.com/oklog/run v1.1.0
github.com/olekukonko/tablewriter v0.0.5
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.22.0
github.com/onsi/gomega v1.36.0
github.com/onsi/ginkgo/v2 v2.22.2
github.com/onsi/gomega v1.36.2
github.com/open-policy-agent/opa v0.70.0
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250121094357-24f23b6a27ed
github.com/orcaman/concurrent-map v1.0.0
@@ -224,7 +224,7 @@ require (
github.com/gomodule/redigo v1.9.2 // indirect
github.com/google/flatbuffers v2.0.8+incompatible // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
github.com/google/renameio/v2 v2.0.0 // indirect
github.com/gookit/color v1.5.4 // indirect
github.com/gookit/goutil v0.6.15 // indirect
@@ -327,10 +327,10 @@ require (
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
go.uber.org/zap v1.23.0 // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/mod v0.22.0 // indirect
golang.org/x/sys v0.29.0 // indirect
golang.org/x/time v0.8.0 // indirect
golang.org/x/tools v0.26.0 // indirect
golang.org/x/tools v0.28.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect

20
go.sum
View File

@@ -549,8 +549,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg=
github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4=
@@ -862,13 +862,13 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.36.0 h1:Pb12RlruUtj4XUuPUqeEWc6j5DkVVVA49Uf6YLfC95Y=
github.com/onsi/gomega v1.36.0/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/open-policy-agent/opa v0.70.0 h1:B3cqCN2iQAyKxK6+GI+N40uqkin+wzIrM7YA60t9x1U=
github.com/open-policy-agent/opa v0.70.0/go.mod h1:Y/nm5NY0BX0BqjBriKUiV81sCl8XOjjvqQG7dXrggtI=
github.com/opencloud-eu/reva/v2 v2.27.3-0.20250121094357-24f23b6a27ed h1:0rVMOlcGXgFRNrNgjeCGTyuGKXwD+Y+wQGzm/uVe3CU=
@@ -1273,8 +1273,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1531,8 +1531,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210112230658-8b4aab62c064/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -4,4 +4,5 @@ tmp/**/*
*.coverprofile
.vscode
.idea/
*.log
*.log
*.test

View File

@@ -1,3 +1,23 @@
## 2.22.2
### Maintenance
- Bump github.com/onsi/gomega from 1.36.1 to 1.36.2 (#1499) [cc553ce]
- Bump golang.org/x/crypto (#1498) [2170370]
- Bump golang.org/x/net from 0.32.0 to 0.33.0 (#1496) [a96c44f]
## 2.22.1
### Fixes
Fix CSV encoding
- Update tests [aab3da6]
- Properly encode CSV rows [c09df39]
- Add test case for proper csv escaping [96a80fc]
- Add meta-test [43dad69]
### Maintenance
- ensure *.test files are gitignored so we don't accidentally commit compiled tests again [c88c634]
- remove golang.org/x/net/context in favour of stdlib context [4df44bf]
## 2.22.0
### Features
@@ -1022,7 +1042,7 @@ New Features:
- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
- `ginkgo --failFast` aborts the test suite after the first failure.
- `ginkgo generate file_1 file_2` can take multiple file arguments.
- Ginkgo now summarizes any spec failures that occurred at the end of the test run.
- Ginkgo now summarizes any spec failures that occurred at the end of the test run.
- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
Improvements:
@@ -1056,7 +1076,7 @@ Bug Fixes:
Breaking changes:
- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
- Modified the Reporter interface
- Modified the Reporter interface
- `watch` is now a subcommand, not a flag.
DSL changes:

View File

@@ -1,10 +1,13 @@
package outline
import (
"bytes"
"encoding/csv"
"encoding/json"
"fmt"
"go/ast"
"go/token"
"strconv"
"strings"
"golang.org/x/tools/go/ast/inspector"
@@ -84,9 +87,11 @@ func (o *outline) String() string {
// StringIndent returns a CSV-formated outline, but every line is indented by
// one 'width' of spaces for every level of nesting.
func (o *outline) StringIndent(width int) string {
var b strings.Builder
var b bytes.Buffer
b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n")
csvWriter := csv.NewWriter(&b)
currentIndent := 0
pre := func(n *ginkgoNode) {
b.WriteString(fmt.Sprintf("%*s", currentIndent, ""))
@@ -96,8 +101,22 @@ func (o *outline) StringIndent(width int) string {
} else {
labels = strings.Join(n.Labels, ", ")
}
//enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings
b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels))
row := []string{
n.Name,
n.Text,
strconv.Itoa(n.Start),
strconv.Itoa(n.End),
strconv.FormatBool(n.Spec),
strconv.FormatBool(n.Focused),
strconv.FormatBool(n.Pending),
labels,
}
csvWriter.Write(row)
// Ensure we write to `b' before the next `b.WriteString()', which might be adding indentation
csvWriter.Flush()
currentIndent += width
}
post := func(n *ginkgoNode) {
@@ -106,5 +125,6 @@ func (o *outline) StringIndent(width int) string {
for _, n := range o.Nodes {
n.Walk(pre, post)
}
return b.String()
}

View File

@@ -1,6 +1,7 @@
package internal
import (
"context"
"fmt"
"sync"
"time"
@@ -9,7 +10,6 @@ import (
"github.com/onsi/ginkgo/v2/internal/parallel_support"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
"golang.org/x/net/context"
)
type Phase uint
@@ -20,7 +20,7 @@ const (
PhaseRun
)
var PROGRESS_REPORTER_DEADLING = 5 * time.Second
const ProgressReporterDeadline = 5 * time.Second
type Suite struct {
tree *TreeNode
@@ -370,7 +370,7 @@ func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport
suite.selectiveLock.Lock()
defer suite.selectiveLock.Unlock()
deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING)
deadline, cancel := context.WithTimeout(context.Background(), ProgressReporterDeadline)
defer cancel()
var additionalReports []string
if suite.currentSpecContext != nil {

View File

@@ -1,3 +1,3 @@
package types
const VERSION = "2.22.0"
const VERSION = "2.22.2"

View File

@@ -1,3 +1,17 @@
## 1.36.2
### Maintenance
- Bump google.golang.org/protobuf from 1.35.1 to 1.36.1 (#810) [9a7609d]
- Bump golang.org/x/net from 0.30.0 to 0.33.0 (#807) [b6cb028]
- Bump github.com/onsi/ginkgo/v2 from 2.20.1 to 2.22.1 (#808) [5756529]
- Bump nokogiri from 1.16.3 to 1.16.5 in /docs (#757) [dabc12e]
## 1.36.1
### Fixes
- Fix https://github.com/onsi/gomega/issues/803 [1c6c112]
- resolves onsi/gomega#696: make HaveField great on pointer receivers given only a non-addressable value [4feb9d7]
## 1.36.0
### Features

View File

@@ -22,7 +22,7 @@ import (
"github.com/onsi/gomega/types"
)
const GOMEGA_VERSION = "1.36.0"
const GOMEGA_VERSION = "1.36.2"
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
If you're using Ginkgo then you probably forgot to put your assertion in an It().

View File

@@ -40,7 +40,12 @@ func extractField(actual interface{}, field string, matchername string) (any, er
extractedValue = actualValue.Addr().MethodByName(strings.TrimSuffix(fields[0], "()"))
}
if extractedValue == (reflect.Value{}) {
return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual))
ptr := reflect.New(actualValue.Type())
ptr.Elem().Set(actualValue)
extractedValue = ptr.MethodByName(strings.TrimSuffix(fields[0], "()"))
if extractedValue == (reflect.Value{}) {
return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual))
}
}
t := extractedValue.Type()
if t.NumIn() != 0 || t.NumOut() != 1 {

View File

@@ -1,39 +0,0 @@
// Package semver is a thin forwarding layer on top of
// [golang.org/x/mod/semver]. See that package for documentation.
//
// Deprecated: use [golang.org/x/mod/semver] instead.
package semver
import "golang.org/x/mod/semver"
func IsValid(v string) bool {
return semver.IsValid(v)
}
func Canonical(v string) string {
return semver.Canonical(v)
}
func Major(v string) string {
return semver.Major(v)
}
func MajorMinor(v string) string {
return semver.MajorMinor(v)
}
func Prerelease(v string) string {
return semver.Prerelease(v)
}
func Build(v string) string {
return semver.Build(v)
}
func Compare(v, w string) int {
return semver.Compare(v, w)
}
func Max(v, w string) string {
return semver.Max(v, w)
}

View File

@@ -344,7 +344,12 @@ func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (r
}
// UsesImport reports whether a given import is used.
// The provided File must have been parsed with syntactic object resolution
// (not using go/parser.SkipObjectResolution).
func UsesImport(f *ast.File, path string) (used bool) {
if f.Scope == nil {
panic("file f was not parsed with syntactic object resolution")
}
spec := importSpec(f, path)
if spec == nil {
return

View File

@@ -180,7 +180,9 @@ func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, s
// traverse builds the table of events representing a traversal.
func traverse(files []*ast.File) []event {
// Preallocate approximate number of events
// based on source file extent.
// based on source file extent of the declarations.
// (We use End-Pos not FileStart-FileEnd to neglect
// the effect of long doc comments.)
// This makes traverse faster by 4x (!).
var extent int
for _, f := range files {

View File

@@ -2,22 +2,64 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gcexportdata provides functions for locating, reading, and
// writing export data files containing type information produced by the
// gc compiler. This package supports go1.7 export data format and all
// later versions.
// Package gcexportdata provides functions for reading and writing
// export data, which is a serialized description of the API of a Go
// package including the names, kinds, types, and locations of all
// exported declarations.
//
// Although it might seem convenient for this package to live alongside
// go/types in the standard library, this would cause version skew
// problems for developer tools that use it, since they must be able to
// consume the outputs of the gc compiler both before and after a Go
// update such as from Go 1.7 to Go 1.8. Because this package lives in
// golang.org/x/tools, sites can update their version of this repo some
// time before the Go 1.8 release and rebuild and redeploy their
// developer tools, which will then be able to consume both Go 1.7 and
// Go 1.8 export data files, so they will work before and after the
// Go update. (See discussion at https://golang.org/issue/15651.)
package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
// The standard Go compiler (cmd/compile) writes an export data file
// for each package it compiles, which it later reads when compiling
// packages that import the earlier one. The compiler must thus
// contain logic to both write and read export data.
// (See the "Export" section in the cmd/compile/README file.)
//
// The [Read] function in this package can read files produced by the
// compiler, producing [go/types] data structures. As a matter of
// policy, Read supports export data files produced by only the last
// two Go releases plus tip; see https://go.dev/issue/68898. The
// export data files produced by the compiler contain additional
// details related to generics, inlining, and other optimizations that
// cannot be decoded by the [Read] function.
//
// In files written by the compiler, the export data is not at the
// start of the file. Before calling Read, use [NewReader] to locate
// the desired portion of the file.
//
// The [Write] function in this package encodes the exported API of a
// Go package ([types.Package]) as a file. Such files can be later
// decoded by Read, but cannot be consumed by the compiler.
//
// # Future changes
//
// Although Read supports the formats written by both Write and the
// compiler, the two are quite different, and there is an open
// proposal (https://go.dev/issue/69491) to separate these APIs.
//
// Under that proposal, this package would ultimately provide only the
// Read operation for compiler export data, which must be defined in
// this module (golang.org/x/tools), not in the standard library, to
// avoid version skew for developer tools that need to read compiler
// export data both before and after a Go release, such as from Go
// 1.23 to Go 1.24. Because this package lives in the tools module,
// clients can update their version of the module some time before the
// Go 1.24 release and rebuild and redeploy their tools, which will
// then be able to consume both Go 1.23 and Go 1.24 export data files,
// so they will work before and after the Go update. (See discussion
// at https://go.dev/issue/15651.)
//
// The operations to import and export [go/types] data structures
// would be defined in the go/types package as Import and Export.
// [Write] would (eventually) delegate to Export,
// and [Read], when it detects a file produced by Export,
// would delegate to Import.
//
// # Deprecations
//
// The [NewImporter] and [Find] functions are deprecated and should
// not be used in new code. The [WriteBundle] and [ReadBundle]
// functions are experimental, and there is an open proposal to
// deprecate them (https://go.dev/issue/69573).
package gcexportdata
import (
"bufio"
@@ -64,24 +106,18 @@ func Find(importPath, srcDir string) (filename, path string) {
// additional trailing data beyond the end of the export data.
func NewReader(r io.Reader) (io.Reader, error) {
buf := bufio.NewReader(r)
_, size, err := gcimporter.FindExportData(buf)
size, err := gcimporter.FindExportData(buf)
if err != nil {
return nil, err
}
if size >= 0 {
// We were given an archive and found the __.PKGDEF in it.
// This tells us the size of the export data, and we don't
// need to return the entire file.
return &io.LimitedReader{
R: buf,
N: size,
}, nil
} else {
// We were given an object file. As such, we don't know how large
// the export data is and must return the entire file.
return buf, nil
}
// We were given an archive and found the __.PKGDEF in it.
// This tells us the size of the export data, and we don't
// need to return the entire file.
return &io.LimitedReader{
R: buf,
N: size,
}, nil
}
// readAll works the same way as io.ReadAll, but avoids allocations and copies
@@ -100,6 +136,11 @@ func readAll(r io.Reader) ([]byte, error) {
// Read reads export data from in, decodes it, and returns type
// information for the package.
//
// Read is capable of reading export data produced by [Write] at the
// same source code version, or by the last two Go releases (plus tip)
// of the standard Go compiler. Reading files from older compilers may
// produce an error.
//
// The package path (effectively its linker symbol prefix) is
// specified by path, since unlike the package name, this information
// may not be recorded in the export data.
@@ -128,14 +169,26 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
// (from "version"). Select appropriate importer.
if len(data) > 0 {
switch data[0] {
case 'v', 'c', 'd': // binary, till go1.10
case 'v', 'c', 'd':
// binary, produced by cmd/compile till go1.10
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
case 'i': // indexed, till go1.19
case 'i':
// indexed, produced by cmd/compile till go1.19,
// and also by [Write].
//
// If proposal #69491 is accepted, go/types
// serialization will be implemented by
// types.Export, to which Write would eventually
// delegate (explicitly dropping any pretence at
// inter-version Write-Read compatibility).
// This [Read] function would delegate to types.Import
// when it detects that the file was produced by Export.
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
return pkg, err
case 'u': // unified, from go1.20
case 'u':
// unified, produced by cmd/compile since go1.20
_, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
return pkg, err

View File

@@ -13,6 +13,7 @@ import (
"fmt"
"os"
"os/exec"
"slices"
"strings"
)
@@ -79,7 +80,7 @@ type DriverResponse struct {
// driver is the type for functions that query the build system for the
// packages named by the patterns.
type driver func(cfg *Config, patterns ...string) (*DriverResponse, error)
type driver func(cfg *Config, patterns []string) (*DriverResponse, error)
// findExternalDriver returns the file path of a tool that supplies
// the build system package structure, or "" if not found.
@@ -103,7 +104,7 @@ func findExternalDriver(cfg *Config) driver {
return nil
}
}
return func(cfg *Config, words ...string) (*DriverResponse, error) {
return func(cfg *Config, patterns []string) (*DriverResponse, error) {
req, err := json.Marshal(DriverRequest{
Mode: cfg.Mode,
Env: cfg.Env,
@@ -117,7 +118,7 @@ func findExternalDriver(cfg *Config) driver {
buf := new(bytes.Buffer)
stderr := new(bytes.Buffer)
cmd := exec.CommandContext(cfg.Context, tool, words...)
cmd := exec.CommandContext(cfg.Context, tool, patterns...)
cmd.Dir = cfg.Dir
// The cwd gets resolved to the real path. On Darwin, where
// /tmp is a symlink, this breaks anything that expects the
@@ -131,7 +132,7 @@ func findExternalDriver(cfg *Config) driver {
// command.
//
// (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go)
cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir)
cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir)
cmd.Stdin = bytes.NewReader(req)
cmd.Stdout = buf
cmd.Stderr = stderr
@@ -150,7 +151,3 @@ func findExternalDriver(cfg *Config) driver {
return &response, nil
}
}
// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)].
// TODO(adonovan): use go1.21 slices.Clip.
func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] }

View File

@@ -80,6 +80,12 @@ type golistState struct {
cfg *Config
ctx context.Context
runner *gocommand.Runner
// overlay is the JSON file that encodes the Config.Overlay
// mapping, used by 'go list -overlay=...'.
overlay string
envOnce sync.Once
goEnvError error
goEnv map[string]string
@@ -127,7 +133,10 @@ func (state *golistState) mustGetEnv() map[string]string {
// goListDriver uses the go list command to interpret the patterns and produce
// the build system package structure.
// See driver for more details.
func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) {
//
// overlay is the JSON file that encodes the cfg.Overlay
// mapping, used by 'go list -overlay=...'
func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) {
// Make sure that any asynchronous go commands are killed when we return.
parentCtx := cfg.Context
if parentCtx == nil {
@@ -142,13 +151,15 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error
cfg: cfg,
ctx: ctx,
vendorDirs: map[string]bool{},
overlay: overlay,
runner: runner,
}
// Fill in response.Sizes asynchronously if necessary.
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
errCh := make(chan error)
go func() {
compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), cfg.gocmdRunner)
compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner)
response.dr.Compiler = compiler
response.dr.Arch = arch
errCh <- err
@@ -494,13 +505,14 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse
pkg := &Package{
Name: p.Name,
ID: p.ImportPath,
Dir: p.Dir,
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
EmbedFiles: absJoin(p.Dir, p.EmbedFiles),
EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns),
IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles),
forTest: p.ForTest,
ForTest: p.ForTest,
depsErrors: p.DepsErrors,
Module: p.Module,
}
@@ -681,7 +693,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
// getGoVersion returns the effective minor version of the go command.
func (state *golistState) getGoVersion() (int, error) {
state.goVersionOnce.Do(func() {
state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner)
state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner)
})
return state.goVersion, state.goVersionError
}
@@ -751,7 +763,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
}
}
addFields("Name", "ImportPath", "Error") // These fields are always needed
if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 {
if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles",
"CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles",
"SwigFiles", "SwigCXXFiles", "SysoFiles")
@@ -759,7 +771,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
addFields("TestGoFiles", "XTestGoFiles")
}
}
if cfg.Mode&NeedTypes != 0 {
if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
// CompiledGoFiles seems to be required for the test case TestCgoNoSyntax,
// even when -compiled isn't passed in.
// TODO(#52435): Should we make the test ask for -compiled, or automatically
@@ -784,7 +796,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
// Request Dir in the unlikely case Export is not absolute.
addFields("Dir", "Export")
}
if cfg.Mode&needInternalForTest != 0 {
if cfg.Mode&NeedForTest != 0 {
addFields("ForTest")
}
if cfg.Mode&needInternalDepsErrors != 0 {
@@ -840,7 +852,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation {
Env: cfg.Env,
Logf: cfg.Logf,
WorkingDir: cfg.Dir,
Overlay: cfg.goListOverlayFile,
Overlay: state.overlay,
}
}
@@ -851,11 +863,8 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer,
inv := state.cfgInvocation()
inv.Verb = verb
inv.Args = args
gocmdRunner := cfg.gocmdRunner
if gocmdRunner == nil {
gocmdRunner = &gocommand.Runner{}
}
stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv)
stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv)
if err != nil {
// Check for 'go' executable not being found.
if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
@@ -879,6 +888,12 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer,
return nil, friendlyErr
}
// Return an error if 'go list' failed due to missing tools in
// $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606).
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) {
return nil, friendlyErr
}
// Is there an error running the C compiler in cgo? This will be reported in the "Error" field
// and should be suppressed by go list -e.
//

View File

@@ -23,6 +23,7 @@ var modes = [...]struct {
{NeedSyntax, "NeedSyntax"},
{NeedTypesInfo, "NeedTypesInfo"},
{NeedTypesSizes, "NeedTypesSizes"},
{NeedForTest, "NeedForTest"},
{NeedModule, "NeedModule"},
{NeedEmbedFiles, "NeedEmbedFiles"},
{NeedEmbedPatterns, "NeedEmbedPatterns"},

View File

@@ -16,13 +16,13 @@ import (
"go/scanner"
"go/token"
"go/types"
"io"
"log"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"golang.org/x/sync/errgroup"
@@ -31,7 +31,6 @@ import (
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/packagesinternal"
"golang.org/x/tools/internal/typesinternal"
"golang.org/x/tools/internal/versions"
)
// A LoadMode controls the amount of detail to return when loading.
@@ -44,6 +43,20 @@ import (
// ID and Errors (if present) will always be filled.
// [Load] may return more information than requested.
//
// The Mode flag is a union of several bits named NeedName,
// NeedFiles, and so on, each of which determines whether
// a given field of Package (Name, Files, etc) should be
// populated.
//
// For convenience, we provide named constants for the most
// common combinations of Need flags:
//
// [LoadFiles] lists of files in each package
// [LoadImports] ... plus imports
// [LoadTypes] ... plus type information
// [LoadSyntax] ... plus type-annotated syntax
// [LoadAllSyntax] ... for all dependencies
//
// Unfortunately there are a number of open bugs related to
// interactions among the LoadMode bits:
// - https://github.com/golang/go/issues/56633
@@ -56,7 +69,7 @@ const (
// NeedName adds Name and PkgPath.
NeedName LoadMode = 1 << iota
// NeedFiles adds GoFiles and OtherFiles.
// NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles
NeedFiles
// NeedCompiledGoFiles adds CompiledGoFiles.
@@ -78,7 +91,7 @@ const (
// NeedSyntax adds Syntax and Fset.
NeedSyntax
// NeedTypesInfo adds TypesInfo.
// NeedTypesInfo adds TypesInfo and Fset.
NeedTypesInfo
// NeedTypesSizes adds TypesSizes.
@@ -87,9 +100,10 @@ const (
// needInternalDepsErrors adds the internal deps errors field for use by gopls.
needInternalDepsErrors
// needInternalForTest adds the internal forTest field.
// NeedForTest adds ForTest.
//
// Tests must also be set on the context for this field to be populated.
needInternalForTest
NeedForTest
// typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
// Modifies CompiledGoFiles and Types, and has no effect on its own.
@@ -109,33 +123,18 @@ const (
const (
// LoadFiles loads the name and file names for the initial packages.
//
// Deprecated: LoadFiles exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
// LoadImports loads the name, file names, and import mapping for the initial packages.
//
// Deprecated: LoadImports exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
LoadImports = LoadFiles | NeedImports
// LoadTypes loads exported type information for the initial packages.
//
// Deprecated: LoadTypes exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
// LoadSyntax loads typed syntax for the initial packages.
//
// Deprecated: LoadSyntax exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
// LoadAllSyntax loads typed syntax for the initial packages and all dependencies.
//
// Deprecated: LoadAllSyntax exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
LoadAllSyntax = LoadSyntax | NeedDeps
// Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile.
@@ -145,13 +144,7 @@ const (
// A Config specifies details about how packages should be loaded.
// The zero value is a valid configuration.
//
// Calls to Load do not modify this struct.
//
// TODO(adonovan): #67702: this is currently false: in fact,
// calls to [Load] do not modify the public fields of this struct, but
// may modify hidden fields, so concurrent calls to [Load] must not
// use the same Config. But perhaps we should reestablish the
// documented invariant.
// Calls to [Load] do not modify this struct.
type Config struct {
// Mode controls the level of information returned for each package.
Mode LoadMode
@@ -182,19 +175,10 @@ type Config struct {
//
Env []string
// gocmdRunner guards go command calls from concurrency errors.
gocmdRunner *gocommand.Runner
// BuildFlags is a list of command-line flags to be passed through to
// the build system's query tool.
BuildFlags []string
// modFile will be used for -modfile in go command invocations.
modFile string
// modFlag will be used for -modfile in go command invocations.
modFlag string
// Fset provides source position information for syntax trees and types.
// If Fset is nil, Load will use a new fileset, but preserve Fset's value.
Fset *token.FileSet
@@ -241,9 +225,13 @@ type Config struct {
// drivers may vary in their level of support for overlays.
Overlay map[string][]byte
// goListOverlayFile is the JSON file that encodes the Overlay
// mapping, used by 'go list -overlay=...'
goListOverlayFile string
// -- Hidden configuration fields only for use in x/tools --
// modFile will be used for -modfile in go command invocations.
modFile string
// modFlag will be used for -modfile in go command invocations.
modFlag string
}
// Load loads and returns the Go packages named by the given patterns.
@@ -334,21 +322,24 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro
} else if !response.NotHandled {
return response, true, nil
}
// (fall through)
// not handled: fall through
}
// go list fallback
//
// Write overlays once, as there are many calls
// to 'go list' (one per chunk plus others too).
overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay)
overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay)
if err != nil {
return nil, false, err
}
defer cleanupOverlay()
cfg.goListOverlayFile = overlay
response, err := callDriverOnChunks(goListDriver, cfg, chunks)
var runner gocommand.Runner // (shared across many 'go list' calls)
driver := func(cfg *Config, patterns []string) (*DriverResponse, error) {
return goListDriver(cfg, &runner, overlayFile, patterns)
}
response, err := callDriverOnChunks(driver, cfg, chunks)
if err != nil {
return nil, false, err
}
@@ -386,16 +377,14 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) {
func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) {
if len(chunks) == 0 {
return driver(cfg)
return driver(cfg, nil)
}
responses := make([]*DriverResponse, len(chunks))
errNotHandled := errors.New("driver returned NotHandled")
var g errgroup.Group
for i, chunk := range chunks {
i := i
chunk := chunk
g.Go(func() (err error) {
responses[i], err = driver(cfg, chunk...)
responses[i], err = driver(cfg, chunk)
if responses[i] != nil && responses[i].NotHandled {
err = errNotHandled
}
@@ -445,6 +434,12 @@ type Package struct {
// PkgPath is the package path as used by the go/types package.
PkgPath string
// Dir is the directory associated with the package, if it exists.
//
// For packages listed by the go command, this is the directory containing
// the package files.
Dir string
// Errors contains any errors encountered querying the metadata
// of the package, or while parsing or type-checking its files.
Errors []Error
@@ -532,8 +527,8 @@ type Package struct {
// -- internal --
// forTest is the package under test, if any.
forTest string
// ForTest is the package under test, if any.
ForTest string
// depsErrors is the DepsErrors field from the go list response, if any.
depsErrors []*packagesinternal.PackageError
@@ -562,9 +557,6 @@ type ModuleError struct {
}
func init() {
packagesinternal.GetForTest = func(p interface{}) string {
return p.(*Package).forTest
}
packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError {
return p.(*Package).depsErrors
}
@@ -576,7 +568,6 @@ func init() {
}
packagesinternal.TypecheckCgo = int(typecheckCgo)
packagesinternal.DepsErrors = int(needInternalDepsErrors)
packagesinternal.ForTest = int(needInternalForTest)
}
// An Error describes a problem with a package's metadata, syntax, or types.
@@ -692,18 +683,19 @@ func (p *Package) String() string { return p.ID }
// loaderPackage augments Package with state used during the loading phase
type loaderPackage struct {
*Package
importErrors map[string]error // maps each bad import to its error
loadOnce sync.Once
color uint8 // for cycle detection
needsrc bool // load from source (Mode >= LoadTypes)
needtypes bool // type information is either requested or depended on
initial bool // package was matched by a pattern
goVersion int // minor version number of go command on PATH
importErrors map[string]error // maps each bad import to its error
preds []*loaderPackage // packages that import this one
unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded
color uint8 // for cycle detection
needsrc bool // load from source (Mode >= LoadTypes)
needtypes bool // type information is either requested or depended on
initial bool // package was matched by a pattern
goVersion int // minor version number of go command on PATH
}
// loader holds the working state of a single call to load.
type loader struct {
pkgs map[string]*loaderPackage
pkgs map[string]*loaderPackage // keyed by Package.ID
Config
sizes types.Sizes // non-nil if needed by mode
parseCache map[string]*parseValue
@@ -749,9 +741,6 @@ func newLoader(cfg *Config) *loader {
if ld.Config.Env == nil {
ld.Config.Env = os.Environ()
}
if ld.Config.gocmdRunner == nil {
ld.Config.gocmdRunner = &gocommand.Runner{}
}
if ld.Context == nil {
ld.Context = context.Background()
}
@@ -765,7 +754,7 @@ func newLoader(cfg *Config) *loader {
ld.requestedMode = ld.Mode
ld.Mode = impliedLoadMode(ld.Mode)
if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
if ld.Fset == nil {
ld.Fset = token.NewFileSet()
}
@@ -806,7 +795,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe"
// This package needs type information if the caller requested types and the package is
// either a root, or it's a non-root and the user requested dependencies ...
needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
// This package needs source if the call requested source (or types info, which implies source)
// and the package is either a root, or itas a non- root and the user requested dependencies...
needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) ||
@@ -831,9 +820,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
}
}
if ld.Mode&NeedImports != 0 {
// Materialize the import graph.
// Materialize the import graph if it is needed (NeedImports),
// or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}).
var leaves []*loaderPackage // packages with no unfinished successors
if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
const (
white = 0 // new
grey = 1 // in progress
@@ -852,63 +842,76 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
// dependency on a package that does. These are the only packages
// for which we load source code.
var stack []*loaderPackage
var visit func(lpkg *loaderPackage) bool
visit = func(lpkg *loaderPackage) bool {
switch lpkg.color {
case black:
return lpkg.needsrc
case grey:
var visit func(from, lpkg *loaderPackage) bool
visit = func(from, lpkg *loaderPackage) bool {
if lpkg.color == grey {
panic("internal error: grey node")
}
lpkg.color = grey
stack = append(stack, lpkg) // push
stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
lpkg.Imports = make(map[string]*Package, len(stubs))
for importPath, ipkg := range stubs {
var importErr error
imp := ld.pkgs[ipkg.ID]
if imp == nil {
// (includes package "C" when DisableCgo)
importErr = fmt.Errorf("missing package: %q", ipkg.ID)
} else if imp.color == grey {
importErr = fmt.Errorf("import cycle: %s", stack)
}
if importErr != nil {
if lpkg.importErrors == nil {
lpkg.importErrors = make(map[string]error)
if lpkg.color == white {
lpkg.color = grey
stack = append(stack, lpkg) // push
stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
lpkg.Imports = make(map[string]*Package, len(stubs))
for importPath, ipkg := range stubs {
var importErr error
imp := ld.pkgs[ipkg.ID]
if imp == nil {
// (includes package "C" when DisableCgo)
importErr = fmt.Errorf("missing package: %q", ipkg.ID)
} else if imp.color == grey {
importErr = fmt.Errorf("import cycle: %s", stack)
}
lpkg.importErrors[importPath] = importErr
continue
if importErr != nil {
if lpkg.importErrors == nil {
lpkg.importErrors = make(map[string]error)
}
lpkg.importErrors[importPath] = importErr
continue
}
if visit(lpkg, imp) {
lpkg.needsrc = true
}
lpkg.Imports[importPath] = imp.Package
}
if visit(imp) {
lpkg.needsrc = true
// -- postorder --
// Complete type information is required for the
// immediate dependencies of each source package.
if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
for _, ipkg := range lpkg.Imports {
ld.pkgs[ipkg.ID].needtypes = true
}
}
lpkg.Imports[importPath] = imp.Package
// NeedTypeSizes causes TypeSizes to be set even
// on packages for which types aren't needed.
if ld.Mode&NeedTypesSizes != 0 {
lpkg.TypesSizes = ld.sizes
}
// Add packages with no imports directly to the queue of leaves.
if len(lpkg.Imports) == 0 {
leaves = append(leaves, lpkg)
}
stack = stack[:len(stack)-1] // pop
lpkg.color = black
}
// Complete type information is required for the
// immediate dependencies of each source package.
if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
for _, ipkg := range lpkg.Imports {
ld.pkgs[ipkg.ID].needtypes = true
}
// Add edge from predecessor.
if from != nil {
from.unfinishedSuccs.Add(+1) // incref
lpkg.preds = append(lpkg.preds, from)
}
// NeedTypeSizes causes TypeSizes to be set even
// on packages for which types aren't needed.
if ld.Mode&NeedTypesSizes != 0 {
lpkg.TypesSizes = ld.sizes
}
stack = stack[:len(stack)-1] // pop
lpkg.color = black
return lpkg.needsrc
}
// For each initial package, create its import DAG.
for _, lpkg := range initial {
visit(lpkg)
visit(nil, lpkg)
}
} else {
@@ -921,16 +924,45 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
// Load type data and syntax if needed, starting at
// the initial packages (roots of the import DAG).
if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
var wg sync.WaitGroup
for _, lpkg := range initial {
wg.Add(1)
go func(lpkg *loaderPackage) {
ld.loadRecursive(lpkg)
wg.Done()
}(lpkg)
if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
// We avoid using g.SetLimit to limit concurrency as
// it makes g.Go stop accepting work, which prevents
// workers from enqeuing, and thus finishing, and thus
// allowing the group to make progress: deadlock.
//
// Instead we use the ioLimit and cpuLimit semaphores.
g, _ := errgroup.WithContext(ld.Context)
// enqueues adds a package to the type-checking queue.
// It must have no unfinished successors.
var enqueue func(*loaderPackage)
enqueue = func(lpkg *loaderPackage) {
g.Go(func() error {
// Parse and type-check.
ld.loadPackage(lpkg)
// Notify each waiting predecessor,
// and enqueue it when it becomes a leaf.
for _, pred := range lpkg.preds {
if pred.unfinishedSuccs.Add(-1) == 0 { // decref
enqueue(pred)
}
}
return nil
})
}
// Load leaves first, adding new packages
// to the queue as they become leaves.
for _, leaf := range leaves {
enqueue(leaf)
}
if err := g.Wait(); err != nil {
return nil, err // cancelled
}
wg.Wait()
}
// If the context is done, return its error and
@@ -977,7 +1009,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
if ld.requestedMode&NeedSyntax == 0 {
ld.pkgs[i].Syntax = nil
}
if ld.requestedMode&NeedTypes == 0 && ld.requestedMode&NeedSyntax == 0 {
if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 {
ld.pkgs[i].Fset = nil
}
if ld.requestedMode&NeedTypesInfo == 0 {
@@ -994,31 +1026,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
return result, nil
}
// loadRecursive loads the specified package and its dependencies,
// recursively, in parallel, in topological order.
// It is atomic and idempotent.
// Precondition: ld.Mode&NeedTypes.
func (ld *loader) loadRecursive(lpkg *loaderPackage) {
lpkg.loadOnce.Do(func() {
// Load the direct dependencies, in parallel.
var wg sync.WaitGroup
for _, ipkg := range lpkg.Imports {
imp := ld.pkgs[ipkg.ID]
wg.Add(1)
go func(imp *loaderPackage) {
ld.loadRecursive(imp)
wg.Done()
}(imp)
}
wg.Wait()
ld.loadPackage(lpkg)
})
}
// loadPackage loads the specified package.
// loadPackage loads/parses/typechecks the specified package.
// It must be called only once per Package,
// after immediate dependencies are loaded.
// Precondition: ld.Mode & NeedTypes.
// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0.
func (ld *loader) loadPackage(lpkg *loaderPackage) {
if lpkg.PkgPath == "unsafe" {
// Fill in the blanks to avoid surprises.
@@ -1054,6 +1065,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
if !lpkg.needtypes && !lpkg.needsrc {
return
}
// TODO(adonovan): this condition looks wrong:
// I think it should be lpkg.needtypes && !lpg.needsrc,
// so that NeedSyntax without NeedTypes can be satisfied by export data.
if !lpkg.needsrc {
if err := ld.loadFromExportData(lpkg); err != nil {
lpkg.Errors = append(lpkg.Errors, Error{
@@ -1159,7 +1174,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
}
lpkg.Syntax = files
if ld.Config.Mode&NeedTypes == 0 {
if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 {
return
}
@@ -1170,16 +1185,20 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
return
}
lpkg.TypesInfo = &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Instances: make(map[*ast.Ident]types.Instance),
Scopes: make(map[ast.Node]*types.Scope),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
// Populate TypesInfo only if needed, as it
// causes the type checker to work much harder.
if ld.Config.Mode&NeedTypesInfo != 0 {
lpkg.TypesInfo = &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Instances: make(map[*ast.Ident]types.Instance),
Scopes: make(map[ast.Node]*types.Scope),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
FileVersions: make(map[*ast.File]string),
}
}
versions.InitFileVersions(lpkg.TypesInfo)
lpkg.TypesSizes = ld.sizes
importer := importerFunc(func(path string) (*types.Package, error) {
@@ -1232,6 +1251,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
}
}
// Type-checking is CPU intensive.
cpuLimit <- unit{} // acquire a token
defer func() { <-cpuLimit }() // release a token
typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
lpkg.importErrors = nil // no longer needed
@@ -1296,8 +1319,11 @@ type importerFunc func(path string) (*types.Package, error)
func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
// We use a counting semaphore to limit
// the number of parallel I/O calls per process.
var ioLimit = make(chan bool, 20)
// the number of parallel I/O calls or CPU threads per process.
var (
ioLimit = make(chan unit, 20)
cpuLimit = make(chan unit, runtime.GOMAXPROCS(0))
)
func (ld *loader) parseFile(filename string) (*ast.File, error) {
ld.parseCacheMu.Lock()
@@ -1314,20 +1340,28 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) {
var src []byte
for f, contents := range ld.Config.Overlay {
// TODO(adonovan): Inefficient for large overlays.
// Do an exact name-based map lookup
// (for nonexistent files) followed by a
// FileID-based map lookup (for existing ones).
if sameFile(f, filename) {
src = contents
break
}
}
var err error
if src == nil {
ioLimit <- true // wait
ioLimit <- unit{} // acquire a token
src, err = os.ReadFile(filename)
<-ioLimit // signal
<-ioLimit // release a token
}
if err != nil {
v.err = err
} else {
// Parsing is CPU intensive.
cpuLimit <- unit{} // acquire a token
v.f, v.err = ld.ParseFile(ld.Fset, filename, src)
<-cpuLimit // release a token
}
close(v.ready)
@@ -1342,18 +1376,21 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) {
// Because files are scanned in parallel, the token.Pos
// positions of the resulting ast.Files are not ordered.
func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
var wg sync.WaitGroup
n := len(filenames)
parsed := make([]*ast.File, n)
errors := make([]error, n)
for i, file := range filenames {
wg.Add(1)
go func(i int, filename string) {
var (
n = len(filenames)
parsed = make([]*ast.File, n)
errors = make([]error, n)
)
var g errgroup.Group
for i, filename := range filenames {
// This creates goroutines unnecessarily in the
// cache-hit case, but that case is uncommon.
g.Go(func() error {
parsed[i], errors[i] = ld.parseFile(filename)
wg.Done()
}(i, file)
return nil
})
}
wg.Wait()
g.Wait()
// Eliminate nils, preserving order.
var o int
@@ -1524,4 +1561,4 @@ func usesExportData(cfg *Config) bool {
return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
}
var _ interface{} = io.Discard // assert build toolchain is go1.16 or later
type unit struct{}

View File

@@ -281,25 +281,25 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
T := o.Type()
if alias, ok := T.(*types.Alias); ok {
if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam, nil); r != nil {
if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil {
return Path(r), nil
}
if r := find(obj, aliases.Rhs(alias), append(path, opRhs), nil); r != nil {
if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil {
return Path(r), nil
}
} else if tname.IsAlias() {
// legacy alias
if r := find(obj, T, path, nil); r != nil {
if r := find(obj, T, path); r != nil {
return Path(r), nil
}
} else if named, ok := T.(*types.Named); ok {
// defined (named) type
if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil {
if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil {
return Path(r), nil
}
if r := find(obj, named.Underlying(), append(path, opUnderlying), nil); r != nil {
if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil {
return Path(r), nil
}
}
@@ -312,7 +312,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
if _, ok := o.(*types.TypeName); !ok {
if o.Exported() {
// exported non-type (const, var, func)
if r := find(obj, o.Type(), append(path, opType), nil); r != nil {
if r := find(obj, o.Type(), append(path, opType)); r != nil {
return Path(r), nil
}
}
@@ -332,7 +332,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
if m == obj {
return Path(path2), nil // found declared method
}
if r := find(obj, m.Type(), append(path2, opType), nil); r != nil {
if r := find(obj, m.Type(), append(path2, opType)); r != nil {
return Path(r), nil
}
}
@@ -447,46 +447,64 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
//
// The seen map is used to short circuit cycles through type parameters. If
// nil, it will be allocated as necessary.
func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte {
//
// The seenMethods map is used internally to short circuit cycles through
// interface methods, such as occur in the following example:
//
// type I interface { f() interface{I} }
//
// See golang/go#68046 for details.
func find(obj types.Object, T types.Type, path []byte) []byte {
return (&finder{obj: obj}).find(T, path)
}
// finder closes over search state for a call to find.
type finder struct {
obj types.Object // the sought object
seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters
seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces
}
func (f *finder) find(T types.Type, path []byte) []byte {
switch T := T.(type) {
case *types.Alias:
return find(obj, types.Unalias(T), path, seen)
return f.find(types.Unalias(T), path)
case *types.Basic, *types.Named:
// Named types belonging to pkg were handled already,
// so T must belong to another package. No path.
return nil
case *types.Pointer:
return find(obj, T.Elem(), append(path, opElem), seen)
return f.find(T.Elem(), append(path, opElem))
case *types.Slice:
return find(obj, T.Elem(), append(path, opElem), seen)
return f.find(T.Elem(), append(path, opElem))
case *types.Array:
return find(obj, T.Elem(), append(path, opElem), seen)
return f.find(T.Elem(), append(path, opElem))
case *types.Chan:
return find(obj, T.Elem(), append(path, opElem), seen)
return f.find(T.Elem(), append(path, opElem))
case *types.Map:
if r := find(obj, T.Key(), append(path, opKey), seen); r != nil {
if r := f.find(T.Key(), append(path, opKey)); r != nil {
return r
}
return find(obj, T.Elem(), append(path, opElem), seen)
return f.find(T.Elem(), append(path, opElem))
case *types.Signature:
if r := findTypeParam(obj, T.RecvTypeParams(), path, opRecvTypeParam, nil); r != nil {
if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil {
return r
}
if r := findTypeParam(obj, T.TypeParams(), path, opTypeParam, seen); r != nil {
if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil {
return r
}
if r := find(obj, T.Params(), append(path, opParams), seen); r != nil {
if r := f.find(T.Params(), append(path, opParams)); r != nil {
return r
}
return find(obj, T.Results(), append(path, opResults), seen)
return f.find(T.Results(), append(path, opResults))
case *types.Struct:
for i := 0; i < T.NumFields(); i++ {
fld := T.Field(i)
path2 := appendOpArg(path, opField, i)
if fld == obj {
if fld == f.obj {
return path2 // found field var
}
if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil {
if r := f.find(fld.Type(), append(path2, opType)); r != nil {
return r
}
}
@@ -495,10 +513,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
for i := 0; i < T.Len(); i++ {
v := T.At(i)
path2 := appendOpArg(path, opAt, i)
if v == obj {
if v == f.obj {
return path2 // found param/result var
}
if r := find(obj, v.Type(), append(path2, opType), seen); r != nil {
if r := f.find(v.Type(), append(path2, opType)); r != nil {
return r
}
}
@@ -506,28 +524,35 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
case *types.Interface:
for i := 0; i < T.NumMethods(); i++ {
m := T.Method(i)
if f.seenMethods[m] {
return nil
}
path2 := appendOpArg(path, opMethod, i)
if m == obj {
if m == f.obj {
return path2 // found interface method
}
if r := find(obj, m.Type(), append(path2, opType), seen); r != nil {
if f.seenMethods == nil {
f.seenMethods = make(map[*types.Func]bool)
}
f.seenMethods[m] = true
if r := f.find(m.Type(), append(path2, opType)); r != nil {
return r
}
}
return nil
case *types.TypeParam:
name := T.Obj()
if name == obj {
return append(path, opObj)
}
if seen[name] {
if f.seenTParamNames[name] {
return nil
}
if seen == nil {
seen = make(map[*types.TypeName]bool)
if name == f.obj {
return append(path, opObj)
}
seen[name] = true
if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil {
if f.seenTParamNames == nil {
f.seenTParamNames = make(map[*types.TypeName]bool)
}
f.seenTParamNames[name] = true
if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil {
return r
}
return nil
@@ -535,11 +560,15 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]
panic(T)
}
func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte, seen map[*types.TypeName]bool) []byte {
func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte {
return (&finder{obj: obj}).findTypeParam(list, path, op)
}
func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte {
for i := 0; i < list.Len(); i++ {
tparam := list.At(i)
path2 := appendOpArg(path, op, i)
if r := find(obj, tparam, path2, seen); r != nil {
if r := f.find(tparam, path2); r != nil {
return r
}
}

View File

@@ -39,12 +39,15 @@ func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
}
// FindExportData positions the reader r at the beginning of the
// export data section of an underlying GC-created object/archive
// export data section of an underlying cmd/compile created archive
// file by reading from it. The reader must be positioned at the
// start of the file before calling this function. The hdr result
// is the string before the export data, either "$$" or "$$B".
// The size result is the length of the export data in bytes, or -1 if not known.
func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
// start of the file before calling this function.
// The size result is the length of the export data in bytes.
//
// This function is needed by [gcexportdata.Read], which must
// accept inputs produced by the last two releases of cmd/compile,
// plus tip.
func FindExportData(r *bufio.Reader) (size int64, err error) {
// Read first line to make sure this is an object file.
line, err := r.ReadSlice('\n')
if err != nil {
@@ -52,28 +55,33 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
return
}
if string(line) == "!<arch>\n" {
// Archive file. Scan to __.PKGDEF.
var name string
if name, size, err = readGopackHeader(r); err != nil {
return
}
// First entry should be __.PKGDEF.
if name != "__.PKGDEF" {
err = fmt.Errorf("go archive is missing __.PKGDEF")
return
}
// Read first line of __.PKGDEF data, so that line
// is once again the first line of the input.
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
size -= int64(len(line))
// Is the first line an archive file signature?
if string(line) != "!<arch>\n" {
err = fmt.Errorf("not the start of an archive file (%q)", line)
return
}
// Archive file. Scan to __.PKGDEF.
var name string
if name, size, err = readGopackHeader(r); err != nil {
return
}
arsize := size
// First entry should be __.PKGDEF.
if name != "__.PKGDEF" {
err = fmt.Errorf("go archive is missing __.PKGDEF")
return
}
// Read first line of __.PKGDEF data, so that line
// is once again the first line of the input.
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
size -= int64(len(line))
// Now at __.PKGDEF in archive or still at beginning of file.
// Either way, line should begin with "go object ".
if !strings.HasPrefix(string(line), "go object ") {
@@ -81,8 +89,8 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
return
}
// Skip over object header to export data.
// Begins after first line starting with $$.
// Skip over object headers to get to the export data section header "$$B\n".
// Object headers are lines that do not start with '$'.
for line[0] != '$' {
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
@@ -90,9 +98,18 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
}
size -= int64(len(line))
}
hdr = string(line)
// Check for the binary export data section header "$$B\n".
hdr := string(line)
if hdr != "$$B\n" {
err = fmt.Errorf("unknown export data header: %q", hdr)
return
}
// TODO(taking): Remove end-of-section marker "\n$$\n" from size.
if size < 0 {
size = -1
err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
return
}
return

View File

@@ -161,6 +161,8 @@ func FindPkg(path, srcDir string) (filename, id string) {
// Import imports a gc-generated package given its import path and srcDir, adds
// the corresponding package object to the packages map, and returns the object.
// The packages map must contain all packages already imported.
//
// TODO(taking): Import is only used in tests. Move to gcimporter_test.
func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
var rc io.ReadCloser
var filename, id string
@@ -210,58 +212,50 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
}
defer rc.Close()
var hdr string
var size int64
buf := bufio.NewReader(rc)
if hdr, size, err = FindExportData(buf); err != nil {
if size, err = FindExportData(buf); err != nil {
return
}
switch hdr {
case "$$B\n":
var data []byte
data, err = io.ReadAll(buf)
if err != nil {
break
}
// TODO(gri): allow clients of go/importer to provide a FileSet.
// Or, define a new standard go/types/gcexportdata package.
fset := token.NewFileSet()
// Select appropriate importer.
if len(data) > 0 {
switch data[0] {
case 'v', 'c', 'd':
// binary: emitted by cmd/compile till go1.10; obsolete.
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
case 'i':
// indexed: emitted by cmd/compile till go1.19;
// now used only for serializing go/types.
// See https://github.com/golang/go/issues/69491.
_, pkg, err := IImportData(fset, packages, data[1:], id)
return pkg, err
case 'u':
// unified: emitted by cmd/compile since go1.20.
_, pkg, err := UImportData(fset, packages, data[1:size], id)
return pkg, err
default:
l := len(data)
if l > 10 {
l = 10
}
return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
}
}
default:
err = fmt.Errorf("unknown export data header: %q", hdr)
var data []byte
data, err = io.ReadAll(buf)
if err != nil {
return
}
if len(data) == 0 {
return nil, fmt.Errorf("no data to load a package from for path %s", id)
}
return
// TODO(gri): allow clients of go/importer to provide a FileSet.
// Or, define a new standard go/types/gcexportdata package.
fset := token.NewFileSet()
// Select appropriate importer.
switch data[0] {
case 'v', 'c', 'd':
// binary: emitted by cmd/compile till go1.10; obsolete.
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
case 'i':
// indexed: emitted by cmd/compile till go1.19;
// now used only for serializing go/types.
// See https://github.com/golang/go/issues/69491.
_, pkg, err := IImportData(fset, packages, data[1:], id)
return pkg, err
case 'u':
// unified: emitted by cmd/compile since go1.20.
_, pkg, err := UImportData(fset, packages, data[1:size], id)
return pkg, err
default:
l := len(data)
if l > 10 {
l = 10
}
return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
}
}
type byPath []*types.Package

View File

@@ -246,6 +246,26 @@ import (
// IExportShallow encodes "shallow" export data for the specified package.
//
// For types, we use "shallow" export data. Historically, the Go
// compiler always produced a summary of the types for a given package
// that included types from other packages that it indirectly
// referenced: "deep" export data. This had the advantage that the
// compiler (and analogous tools such as gopls) need only load one
// file per direct import. However, it meant that the files tended to
// get larger based on the level of the package in the import
// graph. For example, higher-level packages in the kubernetes module
// have over 1MB of "deep" export data, even when they have almost no
// content of their own, merely because they mention a major type that
// references many others. In pathological cases the export data was
// 300x larger than the source for a package due to this quadratic
// growth.
//
// "Shallow" export data means that the serialized types describe only
// a single package. If those types mention types from other packages,
// the type checker may need to request additional packages beyond
// just the direct imports. Type information for the entire transitive
// closure of imports is provided (lazily) by the DAG.
//
// No promises are made about the encoding other than that it can be decoded by
// the same version of IIExportShallow. If you plan to save export data in the
// file system, be sure to include a cryptographic digest of the executable in
@@ -268,8 +288,8 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc)
}
// IImportShallow decodes "shallow" types.Package data encoded by
// IExportShallow in the same executable. This function cannot import data from
// cmd/compile or gcexportdata.Write.
// [IExportShallow] in the same executable. This function cannot import data
// from cmd/compile or gcexportdata.Write.
//
// The importer calls getPackages to obtain package symbols for all
// packages mentioned in the export data, including the one being

View File

@@ -558,6 +558,14 @@ type importReader struct {
prevColumn int64
}
// markBlack is redefined in iimport_go123.go, to work around golang/go#69912.
//
// If TypeNames are not marked black (in the sense of go/types cycle
// detection), they may be mutated when dot-imported. Fix this by punching a
// hole through the type, when compiling with Go 1.23. (The bug has been fixed
// for 1.24, but the fix was not worth back-porting).
var markBlack = func(name *types.TypeName) {}
func (r *importReader) obj(name string) {
tag := r.byte()
pos := r.pos()
@@ -570,6 +578,7 @@ func (r *importReader) obj(name string) {
}
typ := r.typ()
obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams)
markBlack(obj) // workaround for golang/go#69912
r.declare(obj)
case constTag:
@@ -590,6 +599,9 @@ func (r *importReader) obj(name string) {
// declaration before recursing.
obj := types.NewTypeName(pos, r.currPkg, name, nil)
named := types.NewNamed(obj, nil, nil)
markBlack(obj) // workaround for golang/go#69912
// Declare obj before calling r.tparamList, so the new type name is recognized
// if used in the constraint of one of its own typeparams (see #48280).
r.declare(obj)

View File

@@ -0,0 +1,53 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.22 && !go1.24
package gcimporter
import (
"go/token"
"go/types"
"unsafe"
)
// TODO(rfindley): delete this workaround once go1.24 is assured.
func init() {
// Update markBlack so that it correctly sets the color
// of imported TypeNames.
//
// See the doc comment for markBlack for details.
type color uint32
const (
white color = iota
black
grey
)
type object struct {
_ *types.Scope
_ token.Pos
_ *types.Package
_ string
_ types.Type
_ uint32
color_ color
_ token.Pos
}
type typeName struct {
object
}
// If the size of types.TypeName changes, this will fail to compile.
const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{}))
var _ [-delta * delta]int
markBlack = func(obj *types.TypeName) {
type uP = unsafe.Pointer
var ptr *typeName
*(*uP)(uP(&ptr)) = uP(obj)
ptr.color_ = black
}
}

View File

@@ -27,7 +27,6 @@ import (
"unicode"
"unicode/utf8"
"golang.org/x/sync/errgroup"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/gocommand"
@@ -91,18 +90,6 @@ type ImportFix struct {
Relevance float64 // see pkg
}
// An ImportInfo represents a single import statement.
type ImportInfo struct {
ImportPath string // import path, e.g. "crypto/rand".
Name string // import name, e.g. "crand", or "" if none.
}
// A packageInfo represents what's known about a package.
type packageInfo struct {
name string // real package name, if known.
exports map[string]bool // known exports.
}
// parseOtherFiles parses all the Go files in srcDir except filename, including
// test files if filename looks like a test.
//
@@ -162,8 +149,8 @@ func addGlobals(f *ast.File, globals map[string]bool) {
// collectReferences builds a map of selector expressions, from
// left hand side (X) to a set of right hand sides (Sel).
func collectReferences(f *ast.File) references {
refs := references{}
func collectReferences(f *ast.File) References {
refs := References{}
var visitor visitFn
visitor = func(node ast.Node) ast.Visitor {
@@ -233,7 +220,7 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo {
allFound := true
for right := range syms {
if !pkgInfo.exports[right] {
if !pkgInfo.Exports[right] {
allFound = false
break
}
@@ -246,11 +233,6 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo {
return nil
}
// references is set of references found in a Go file. The first map key is the
// left hand side of a selector expression, the second key is the right hand
// side, and the value should always be true.
type references map[string]map[string]bool
// A pass contains all the inputs and state necessary to fix a file's imports.
// It can be modified in some ways during use; see comments below.
type pass struct {
@@ -258,27 +240,29 @@ type pass struct {
fset *token.FileSet // fset used to parse f and its siblings.
f *ast.File // the file being fixed.
srcDir string // the directory containing f.
env *ProcessEnv // the environment to use for go commands, etc.
loadRealPackageNames bool // if true, load package names from disk rather than guessing them.
otherFiles []*ast.File // sibling files.
logf func(string, ...any)
source Source // the environment to use for go commands, etc.
loadRealPackageNames bool // if true, load package names from disk rather than guessing them.
otherFiles []*ast.File // sibling files.
goroot string
// Intermediate state, generated by load.
existingImports map[string][]*ImportInfo
allRefs references
missingRefs references
allRefs References
missingRefs References
// Inputs to fix. These can be augmented between successive fix calls.
lastTry bool // indicates that this is the last call and fix should clean up as best it can.
candidates []*ImportInfo // candidate imports in priority order.
knownPackages map[string]*packageInfo // information about all known packages.
knownPackages map[string]*PackageInfo // information about all known packages.
}
// loadPackageNames saves the package names for everything referenced by imports.
func (p *pass) loadPackageNames(imports []*ImportInfo) error {
if p.env.Logf != nil {
p.env.Logf("loading package names for %v packages", len(imports))
func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) error {
if p.logf != nil {
p.logf("loading package names for %v packages", len(imports))
defer func() {
p.env.Logf("done loading package names for %v packages", len(imports))
p.logf("done loading package names for %v packages", len(imports))
}()
}
var unknown []string
@@ -289,20 +273,17 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error {
unknown = append(unknown, imp.ImportPath)
}
resolver, err := p.env.GetResolver()
if err != nil {
return err
}
names, err := resolver.loadPackageNames(unknown, p.srcDir)
names, err := p.source.LoadPackageNames(ctx, p.srcDir, unknown)
if err != nil {
return err
}
// TODO(rfindley): revisit this. Why do we need to store known packages with
// no exports? The inconsistent data is confusing.
for path, name := range names {
p.knownPackages[path] = &packageInfo{
name: name,
exports: map[string]bool{},
p.knownPackages[path] = &PackageInfo{
Name: name,
Exports: map[string]bool{},
}
}
return nil
@@ -330,8 +311,8 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
return imp.Name
}
known := p.knownPackages[imp.ImportPath]
if known != nil && known.name != "" {
return withoutVersion(known.name)
if known != nil && known.Name != "" {
return withoutVersion(known.Name)
}
return ImportPathToAssumedName(imp.ImportPath)
}
@@ -339,9 +320,9 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
// load reads in everything necessary to run a pass, and reports whether the
// file already has all the imports it needs. It fills in p.missingRefs with the
// file's missing symbols, if any, or removes unused imports if not.
func (p *pass) load() ([]*ImportFix, bool) {
p.knownPackages = map[string]*packageInfo{}
p.missingRefs = references{}
func (p *pass) load(ctx context.Context) ([]*ImportFix, bool) {
p.knownPackages = map[string]*PackageInfo{}
p.missingRefs = References{}
p.existingImports = map[string][]*ImportInfo{}
// Load basic information about the file in question.
@@ -364,9 +345,11 @@ func (p *pass) load() ([]*ImportFix, bool) {
// f's imports by the identifier they introduce.
imports := collectImports(p.f)
if p.loadRealPackageNames {
err := p.loadPackageNames(append(imports, p.candidates...))
err := p.loadPackageNames(ctx, append(imports, p.candidates...))
if err != nil {
p.env.logf("loading package names: %v", err)
if p.logf != nil {
p.logf("loading package names: %v", err)
}
return nil, false
}
}
@@ -535,9 +518,10 @@ func (p *pass) assumeSiblingImportsValid() {
// We have the stdlib in memory; no need to guess.
rights = symbolNameSet(m)
}
p.addCandidate(imp, &packageInfo{
// TODO(rfindley): we should set package name here, for consistency.
p.addCandidate(imp, &PackageInfo{
// no name; we already know it.
exports: rights,
Exports: rights,
})
}
}
@@ -546,14 +530,14 @@ func (p *pass) assumeSiblingImportsValid() {
// addCandidate adds a candidate import to p, and merges in the information
// in pkg.
func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
func (p *pass) addCandidate(imp *ImportInfo, pkg *PackageInfo) {
p.candidates = append(p.candidates, imp)
if existing, ok := p.knownPackages[imp.ImportPath]; ok {
if existing.name == "" {
existing.name = pkg.name
if existing.Name == "" {
existing.Name = pkg.Name
}
for export := range pkg.exports {
existing.exports[export] = true
for export := range pkg.Exports {
existing.Exports[export] = true
}
} else {
p.knownPackages[imp.ImportPath] = pkg
@@ -581,19 +565,42 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P
// getFixes gets the import fixes that need to be made to f in order to fix the imports.
// It does not modify the ast.
func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) {
source, err := NewProcessEnvSource(env, filename, f.Name.Name)
if err != nil {
return nil, err
}
goEnv, err := env.goEnv()
if err != nil {
return nil, err
}
return getFixesWithSource(ctx, fset, f, filename, goEnv["GOROOT"], env.logf, source)
}
func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, goroot string, logf func(string, ...any), source Source) ([]*ImportFix, error) {
// This logic is defensively duplicated from getFixes.
abs, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
srcDir := filepath.Dir(abs)
env.logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir)
if logf != nil {
logf("fixImports(filename=%q), srcDir=%q ...", filename, abs, srcDir)
}
// First pass: looking only at f, and using the naive algorithm to
// derive package names from import paths, see if the file is already
// complete. We can't add any imports yet, because we don't know
// if missing references are actually package vars.
p := &pass{fset: fset, f: f, srcDir: srcDir, env: env}
if fixes, done := p.load(); done {
p := &pass{
fset: fset,
f: f,
srcDir: srcDir,
logf: logf,
goroot: goroot,
source: source,
}
if fixes, done := p.load(ctx); done {
return fixes, nil
}
@@ -605,7 +612,7 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
// Second pass: add information from other files in the same package,
// like their package vars and imports.
p.otherFiles = otherFiles
if fixes, done := p.load(); done {
if fixes, done := p.load(ctx); done {
return fixes, nil
}
@@ -618,10 +625,17 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
// Third pass: get real package names where we had previously used
// the naive algorithm.
p = &pass{fset: fset, f: f, srcDir: srcDir, env: env}
p = &pass{
fset: fset,
f: f,
srcDir: srcDir,
logf: logf,
goroot: goroot,
source: p.source, // safe to reuse, as it's just a wrapper around env
}
p.loadRealPackageNames = true
p.otherFiles = otherFiles
if fixes, done := p.load(); done {
if fixes, done := p.load(ctx); done {
return fixes, nil
}
@@ -835,7 +849,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
return true
},
dirFound: func(pkg *pkg) bool {
return pkgIsCandidate(filename, references{searchPkg: nil}, pkg)
return pkgIsCandidate(filename, References{searchPkg: nil}, pkg)
},
packageNameLoaded: func(pkg *pkg) bool {
return pkg.packageName == searchPkg
@@ -1086,11 +1100,7 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string)
return e.GocmdRunner.Run(ctx, inv)
}
func addStdlibCandidates(pass *pass, refs references) error {
goenv, err := pass.env.goEnv()
if err != nil {
return err
}
func addStdlibCandidates(pass *pass, refs References) error {
localbase := func(nm string) string {
ans := path.Base(nm)
if ans[0] == 'v' {
@@ -1105,13 +1115,13 @@ func addStdlibCandidates(pass *pass, refs references) error {
}
add := func(pkg string) {
// Prevent self-imports.
if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir {
if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.goroot, "src", pkg) == pass.srcDir {
return
}
exports := symbolNameSet(stdlib.PackageSymbols[pkg])
pass.addCandidate(
&ImportInfo{ImportPath: pkg},
&packageInfo{name: localbase(pkg), exports: exports})
&PackageInfo{Name: localbase(pkg), Exports: exports})
}
for left := range refs {
if left == "rand" {
@@ -1175,91 +1185,14 @@ type scanCallback struct {
exportsLoaded func(pkg *pkg, exports []stdlib.Symbol)
}
func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error {
func addExternalCandidates(ctx context.Context, pass *pass, refs References, filename string) error {
ctx, done := event.Start(ctx, "imports.addExternalCandidates")
defer done()
var mu sync.Mutex
found := make(map[string][]pkgDistance)
callback := &scanCallback{
rootFound: func(gopathwalk.Root) bool {
return true // We want everything.
},
dirFound: func(pkg *pkg) bool {
return pkgIsCandidate(filename, refs, pkg)
},
packageNameLoaded: func(pkg *pkg) bool {
if _, want := refs[pkg.packageName]; !want {
return false
}
if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName {
// The candidate is in the same directory and has the
// same package name. Don't try to import ourselves.
return false
}
if !canUse(filename, pkg.dir) {
return false
}
mu.Lock()
defer mu.Unlock()
found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)})
return false // We'll do our own loading after we sort.
},
}
resolver, err := pass.env.GetResolver()
results, err := pass.source.ResolveReferences(ctx, filename, refs)
if err != nil {
return err
}
if err = resolver.scan(ctx, callback); err != nil {
return err
}
// Search for imports matching potential package references.
type result struct {
imp *ImportInfo
pkg *packageInfo
}
results := make([]*result, len(refs))
g, ctx := errgroup.WithContext(ctx)
searcher := symbolSearcher{
logf: pass.env.logf,
srcDir: pass.srcDir,
xtest: strings.HasSuffix(pass.f.Name.Name, "_test"),
loadExports: resolver.loadExports,
}
i := 0
for pkgName, symbols := range refs {
index := i // claim an index in results
i++
pkgName := pkgName
symbols := symbols
g.Go(func() error {
found, err := searcher.search(ctx, found[pkgName], pkgName, symbols)
if err != nil {
return err
}
if found == nil {
return nil // No matching package.
}
imp := &ImportInfo{
ImportPath: found.importPathShort,
}
pkg := &packageInfo{
name: pkgName,
exports: symbols,
}
results[index] = &result{imp, pkg}
return nil
})
}
if err := g.Wait(); err != nil {
return err
}
for _, result := range results {
if result == nil {
@@ -1267,7 +1200,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
}
// Don't offer completions that would shadow predeclared
// names, such as github.com/coreos/etcd/error.
if types.Universe.Lookup(result.pkg.name) != nil { // predeclared
if types.Universe.Lookup(result.Package.Name) != nil { // predeclared
// Ideally we would skip this candidate only
// if the predeclared name is actually
// referenced by the file, but that's a lot
@@ -1276,7 +1209,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
// user before long.
continue
}
pass.addCandidate(result.imp, result.pkg)
pass.addCandidate(result.Import, result.Package)
}
return nil
}
@@ -1801,7 +1734,7 @@ func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols m
// filename is the file being formatted.
// pkgIdent is the package being searched for, like "client" (if
// searching for "client.New")
func pkgIsCandidate(filename string, refs references, pkg *pkg) bool {
func pkgIsCandidate(filename string, refs References, pkg *pkg) bool {
// Check "internal" and "vendor" visibility:
if !canUse(filename, pkg.dir) {
return false

View File

@@ -47,7 +47,14 @@ type Options struct {
// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env.
func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) {
fileSet := token.NewFileSet()
file, adjust, err := parse(fileSet, filename, src, opt)
var parserMode parser.Mode
if opt.Comments {
parserMode |= parser.ParseComments
}
if opt.AllErrors {
parserMode |= parser.AllErrors
}
file, adjust, err := parse(fileSet, filename, src, parserMode, opt.Fragment)
if err != nil {
return nil, err
}
@@ -66,17 +73,19 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e
//
// Note that filename's directory influences which imports can be chosen,
// so it is important that filename be accurate.
func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
func FixImports(ctx context.Context, filename string, src []byte, goroot string, logf func(string, ...any), source Source) (fixes []*ImportFix, err error) {
ctx, done := event.Start(ctx, "imports.FixImports")
defer done()
fileSet := token.NewFileSet()
file, _, err := parse(fileSet, filename, src, opt)
// TODO(rfindley): these default values for ParseComments and AllErrors were
// extracted from gopls, but are they even needed?
file, _, err := parse(fileSet, filename, src, parser.ParseComments|parser.AllErrors, true)
if err != nil {
return nil, err
}
return getFixes(ctx, fileSet, file, filename, opt.Env)
return getFixesWithSource(ctx, fileSet, file, filename, goroot, logf, source)
}
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
@@ -114,7 +123,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
// formatted file, and returns the postpocessed result.
func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
mergeImports(file)
sortImports(opt.LocalPrefix, fset.File(file.Pos()), file)
sortImports(opt.LocalPrefix, fset.File(file.FileStart), file)
var spacesBefore []string // import paths we need spaces before
for _, impSection := range astutil.Imports(fset, file) {
// Within each block of contiguous imports, see if any
@@ -164,13 +173,9 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori
// parse parses src, which was read from filename,
// as a Go source file or statement list.
func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
var parserMode parser.Mode // legacy ast.Object resolution is required here
if opt.Comments {
parserMode |= parser.ParseComments
}
if opt.AllErrors {
parserMode |= parser.AllErrors
func parse(fset *token.FileSet, filename string, src []byte, parserMode parser.Mode, fragment bool) (*ast.File, func(orig, src []byte) []byte, error) {
if parserMode&parser.SkipObjectResolution != 0 {
panic("legacy ast.Object resolution is required")
}
// Try as whole source file.
@@ -181,7 +186,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast
// If the error is that the source file didn't begin with a
// package line and we accept fragmented input, fall through to
// try as a source fragment. Stop and return on any other error.
if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
if !fragment || !strings.Contains(err.Error(), "expected 'package'") {
return nil, nil, err
}

63
vendor/golang.org/x/tools/internal/imports/source.go generated vendored Normal file
View File

@@ -0,0 +1,63 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import "context"
// These types document the APIs below.
//
// TODO(rfindley): consider making these defined types rather than aliases.
type (
ImportPath = string
PackageName = string
Symbol = string
// References is set of References found in a Go file. The first map key is the
// left hand side of a selector expression, the second key is the right hand
// side, and the value should always be true.
References = map[PackageName]map[Symbol]bool
)
// A Result satisfies a missing import.
//
// The Import field describes the missing import spec, and the Package field
// summarizes the package exports.
type Result struct {
Import *ImportInfo
Package *PackageInfo
}
// An ImportInfo represents a single import statement.
type ImportInfo struct {
ImportPath string // import path, e.g. "crypto/rand".
Name string // import name, e.g. "crand", or "" if none.
}
// A PackageInfo represents what's known about a package.
type PackageInfo struct {
Name string // package name in the package declaration, if known
Exports map[string]bool // set of names of known package level sortSymbols
}
// A Source provides imports to satisfy unresolved references in the file being
// fixed.
type Source interface {
// LoadPackageNames queries PackageName information for the requested import
// paths, when operating from the provided srcDir.
//
// TODO(rfindley): try to refactor to remove this operation.
LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error)
// ResolveReferences asks the Source for the best package name to satisfy
// each of the missing references, in the context of fixing the given
// filename.
//
// Returns a map from package name to a [Result] for that package name that
// provides the required symbols. Keys may be omitted in the map if no
// candidates satisfy all missing references for that package name. It is up
// to each data source to select the best result for each entry in the
// missing map.
ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error)
}

View File

@@ -0,0 +1,129 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import (
"context"
"path/filepath"
"strings"
"sync"
"golang.org/x/sync/errgroup"
"golang.org/x/tools/internal/gopathwalk"
)
// ProcessEnvSource implements the [Source] interface using the legacy
// [ProcessEnv] abstraction.
type ProcessEnvSource struct {
env *ProcessEnv
srcDir string
filename string
pkgName string
}
// NewProcessEnvSource returns a [ProcessEnvSource] wrapping the given
// env, to be used for fixing imports in the file with name filename in package
// named pkgName.
func NewProcessEnvSource(env *ProcessEnv, filename, pkgName string) (*ProcessEnvSource, error) {
abs, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
srcDir := filepath.Dir(abs)
return &ProcessEnvSource{
env: env,
srcDir: srcDir,
filename: filename,
pkgName: pkgName,
}, nil
}
func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, unknown []string) (map[string]string, error) {
r, err := s.env.GetResolver()
if err != nil {
return nil, err
}
return r.loadPackageNames(unknown, srcDir)
}
func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) ([]*Result, error) {
var mu sync.Mutex
found := make(map[string][]pkgDistance)
callback := &scanCallback{
rootFound: func(gopathwalk.Root) bool {
return true // We want everything.
},
dirFound: func(pkg *pkg) bool {
return pkgIsCandidate(filename, refs, pkg)
},
packageNameLoaded: func(pkg *pkg) bool {
if _, want := refs[pkg.packageName]; !want {
return false
}
if pkg.dir == s.srcDir && s.pkgName == pkg.packageName {
// The candidate is in the same directory and has the
// same package name. Don't try to import ourselves.
return false
}
if !canUse(filename, pkg.dir) {
return false
}
mu.Lock()
defer mu.Unlock()
found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(s.srcDir, pkg.dir)})
return false // We'll do our own loading after we sort.
},
}
resolver, err := s.env.GetResolver()
if err != nil {
return nil, err
}
if err := resolver.scan(ctx, callback); err != nil {
return nil, err
}
g, ctx := errgroup.WithContext(ctx)
searcher := symbolSearcher{
logf: s.env.logf,
srcDir: s.srcDir,
xtest: strings.HasSuffix(s.pkgName, "_test"),
loadExports: resolver.loadExports,
}
var resultMu sync.Mutex
results := make(map[string]*Result, len(refs))
for pkgName, symbols := range refs {
g.Go(func() error {
found, err := searcher.search(ctx, found[pkgName], pkgName, symbols)
if err != nil {
return err
}
if found == nil {
return nil // No matching package.
}
imp := &ImportInfo{
ImportPath: found.importPathShort,
}
pkg := &PackageInfo{
Name: pkgName,
Exports: symbols,
}
resultMu.Lock()
results[pkgName] = &Result{Import: imp, Package: pkg}
resultMu.Unlock()
return nil
})
}
if err := g.Wait(); err != nil {
return nil, err
}
var ans []*Result
for _, x := range results {
ans = append(ans, x)
}
return ans, nil
}

View File

@@ -0,0 +1,103 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import (
"context"
"sync"
"time"
"golang.org/x/tools/internal/modindex"
)
// This code is here rather than in the modindex package
// to avoid import loops
// implements Source using modindex, so only for module cache.
//
// this is perhaps over-engineered. A new Index is read at first use.
// And then Update is called after every 15 minutes, and a new Index
// is read if the index changed. It is not clear the Mutex is needed.
type IndexSource struct {
modcachedir string
mutex sync.Mutex
ix *modindex.Index
expires time.Time
}
// create a new Source. Called from NewView in cache/session.go.
func NewIndexSource(cachedir string) *IndexSource {
return &IndexSource{modcachedir: cachedir}
}
func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) {
/// This is used by goimports to resolve the package names of imports of the
// current package, which is irrelevant for the module cache.
return nil, nil
}
func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) {
if err := s.maybeReadIndex(); err != nil {
return nil, err
}
var cs []modindex.Candidate
for pkg, nms := range missing {
for nm := range nms {
x := s.ix.Lookup(pkg, nm, false)
cs = append(cs, x...)
}
}
found := make(map[string]*Result)
for _, c := range cs {
var x *Result
if x = found[c.ImportPath]; x == nil {
x = &Result{
Import: &ImportInfo{
ImportPath: c.ImportPath,
Name: "",
},
Package: &PackageInfo{
Name: c.PkgName,
Exports: make(map[string]bool),
},
}
found[c.ImportPath] = x
}
x.Package.Exports[c.Name] = true
}
var ans []*Result
for _, x := range found {
ans = append(ans, x)
}
return ans, nil
}
func (s *IndexSource) maybeReadIndex() error {
s.mutex.Lock()
defer s.mutex.Unlock()
var readIndex bool
if time.Now().After(s.expires) {
ok, err := modindex.Update(s.modcachedir)
if err != nil {
return err
}
if ok {
readIndex = true
}
}
if readIndex || s.ix == nil {
ix, err := modindex.ReadIndex(s.modcachedir)
if err != nil {
return err
}
s.ix = ix
// for now refresh every 15 minutes
s.expires = time.Now().Add(time.Minute * 15)
}
return nil
}

View File

@@ -0,0 +1,135 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"fmt"
"log"
"os"
"path/filepath"
"regexp"
"slices"
"strings"
"sync"
"time"
"golang.org/x/mod/semver"
"golang.org/x/tools/internal/gopathwalk"
)
type directory struct {
path Relpath
importPath string
version string // semantic version
syms []symbol
}
// filterDirs groups the directories by import path,
// sorting the ones with the same import path by semantic version,
// most recent first.
func byImportPath(dirs []Relpath) (map[string][]*directory, error) {
ans := make(map[string][]*directory) // key is import path
for _, d := range dirs {
ip, sv, err := DirToImportPathVersion(d)
if err != nil {
return nil, err
}
ans[ip] = append(ans[ip], &directory{
path: d,
importPath: ip,
version: sv,
})
}
for k, v := range ans {
semanticSort(v)
ans[k] = v
}
return ans, nil
}
// sort the directories by semantic version, latest first
func semanticSort(v []*directory) {
slices.SortFunc(v, func(l, r *directory) int {
if n := semver.Compare(l.version, r.version); n != 0 {
return -n // latest first
}
return strings.Compare(string(l.path), string(r.path))
})
}
// modCacheRegexp splits a relpathpath into module, module version, and package.
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
// DirToImportPathVersion computes import path and semantic version
func DirToImportPathVersion(dir Relpath) (string, string, error) {
m := modCacheRegexp.FindStringSubmatch(string(dir))
// m[1] is the module path
// m[2] is the version major.minor.patch(-<pre release identifier)
// m[3] is the rest of the package path
if len(m) != 4 {
return "", "", fmt.Errorf("bad dir %s", dir)
}
if !semver.IsValid(m[2]) {
return "", "", fmt.Errorf("bad semantic version %s", m[2])
}
// ToSlash is required for Windows.
return filepath.ToSlash(m[1] + m[3]), m[2], nil
}
// a region controls what directories to look at, for
// updating the index incrementally, and for testing that.
// (for testing one builds an index as of A, incrementally
// updates it to B, and compares the result to an index build
// as of B.)
type region struct {
onlyAfter, onlyBefore time.Time
sync.Mutex
ans []Relpath
}
func findDirs(root string, onlyAfter, onlyBefore time.Time) []Relpath {
roots := []gopathwalk.Root{{Path: root, Type: gopathwalk.RootModuleCache}}
// TODO(PJW): adjust concurrency
opts := gopathwalk.Options{ModulesEnabled: true, Concurrency: 1 /* ,Logf: log.Printf*/}
betw := &region{
onlyAfter: onlyAfter,
onlyBefore: onlyBefore,
}
gopathwalk.WalkSkip(roots, betw.addDir, betw.skipDir, opts)
return betw.ans
}
func (r *region) addDir(rt gopathwalk.Root, dir string) {
// do we need to check times?
r.Lock()
defer r.Unlock()
x := filepath.ToSlash(string(toRelpath(Abspath(rt.Path), dir)))
r.ans = append(r.ans, toRelpath(Abspath(rt.Path), x))
}
func (r *region) skipDir(_ gopathwalk.Root, dir string) bool {
// The cache directory is already ignored in gopathwalk\
if filepath.Base(dir) == "internal" {
return true
}
if strings.Contains(dir, "toolchain@") {
return true
}
// don't look inside @ directories that are too old
if strings.Contains(filepath.Base(dir), "@") {
st, err := os.Stat(dir)
if err != nil {
log.Printf("can't stat dir %s %v", dir, err)
return true
}
if st.ModTime().Before(r.onlyAfter) {
return true
}
if st.ModTime().After(r.onlyBefore) {
return true
}
}
return false
}

262
vendor/golang.org/x/tools/internal/modindex/index.go generated vendored Normal file
View File

@@ -0,0 +1,262 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"bufio"
"encoding/csv"
"errors"
"fmt"
"hash/crc64"
"io"
"io/fs"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"time"
)
/*
The on-disk index is a text file.
The first 3 lines are header information containing CurrentVersion,
the value of GOMODCACHE, and the validity date of the index.
(This is when the code started building the index.)
Following the header are sections of lines, one section for each
import path. These sections are sorted by package name.
The first line of each section, marked by a leading :, contains
the package name, the import path, the name of the directory relative
to GOMODCACHE, and its semantic version.
The rest of each section consists of one line per exported symbol.
The lines are sorted by the symbol's name and contain the name,
an indication of its lexical type (C, T, V, F), and if it is the
name of a function, information about the signature.
The fields in the section header lines are separated by commas, and
in the unlikely event this would be confusing, the csv package is used
to write (and read) them.
In the lines containing exported names, C=const, V=var, T=type, F=func.
If it is a func, the next field is the number of returned values,
followed by pairs consisting of formal parameter names and types.
All these fields are separated by spaces. Any spaces in a type
(e.g., chan struct{}) are replaced by $s on the disk. The $s are
turned back into spaces when read.
Here is an index header (the comments are not part of the index):
0 // version (of the index format)
/usr/local/google/home/pjw/go/pkg/mod // GOMODCACHE
2024-09-11 18:55:09 // validity date of the index
Here is an index section:
:yaml,gopkg.in/yaml.v1,gopkg.in/yaml.v1@v1.0.0-20140924161607-9f9df34309c0,v1.0.0-20140924161607-9f9df34309c0
Getter T
Marshal F 2 in interface{}
Setter T
Unmarshal F 1 in []byte out interface{}
The package name is yaml, the import path is gopkg.in/yaml.v1.
Getter and Setter are types, and Marshal and Unmarshal are functions.
The latter returns one value and has two arguments, 'in' and 'out'
whose types are []byte and interface{}.
*/
// CurrentVersion tells readers about the format of the index.
const CurrentVersion int = 0
// Index is returned by ReadIndex().
type Index struct {
Version int
Cachedir Abspath // The directory containing the module cache
Changed time.Time // The index is up to date as of Changed
Entries []Entry
}
// An Entry contains information for an import path.
type Entry struct {
Dir Relpath // directory in modcache
ImportPath string
PkgName string
Version string
//ModTime STime // is this useful?
Names []string // exported names and information
}
// ReadIndex reads the latest version of the on-disk index
// for the cache directory cd.
// It returns (nil, nil) if there is no index, but returns
// a non-nil error if the index exists but could not be read.
func ReadIndex(cachedir string) (*Index, error) {
cachedir, err := filepath.Abs(cachedir)
if err != nil {
return nil, err
}
cd := Abspath(cachedir)
dir, err := IndexDir()
if err != nil {
return nil, err
}
base := indexNameBase(cd)
iname := filepath.Join(dir, base)
buf, err := os.ReadFile(iname)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, nil
}
return nil, fmt.Errorf("cannot read %s: %w", iname, err)
}
fname := filepath.Join(dir, string(buf))
fd, err := os.Open(fname)
if err != nil {
return nil, err
}
defer fd.Close()
r := bufio.NewReader(fd)
ix, err := readIndexFrom(cd, r)
if err != nil {
return nil, err
}
return ix, nil
}
func readIndexFrom(cd Abspath, bx io.Reader) (*Index, error) {
b := bufio.NewScanner(bx)
var ans Index
// header
ok := b.Scan()
if !ok {
return nil, fmt.Errorf("unexpected scan error")
}
l := b.Text()
var err error
ans.Version, err = strconv.Atoi(l)
if err != nil {
return nil, err
}
if ans.Version != CurrentVersion {
return nil, fmt.Errorf("got version %d, expected %d", ans.Version, CurrentVersion)
}
if ok := b.Scan(); !ok {
return nil, fmt.Errorf("scanner error reading cachedir")
}
ans.Cachedir = Abspath(b.Text())
if ok := b.Scan(); !ok {
return nil, fmt.Errorf("scanner error reading index creation time")
}
// TODO(pjw): need to check that this is the expected cachedir
// so the tag should be passed in to this function
ans.Changed, err = time.ParseInLocation(time.DateTime, b.Text(), time.Local)
if err != nil {
return nil, err
}
var curEntry *Entry
for b.Scan() {
v := b.Text()
if v[0] == ':' {
if curEntry != nil {
ans.Entries = append(ans.Entries, *curEntry)
}
// as directories may contain commas and quotes, they need to be read as csv.
rdr := strings.NewReader(v[1:])
cs := csv.NewReader(rdr)
flds, err := cs.Read()
if err != nil {
return nil, err
}
if len(flds) != 4 {
return nil, fmt.Errorf("header contains %d fields, not 4: %q", len(v), v)
}
curEntry = &Entry{PkgName: flds[0], ImportPath: flds[1], Dir: toRelpath(cd, flds[2]), Version: flds[3]}
continue
}
curEntry.Names = append(curEntry.Names, v)
}
if curEntry != nil {
ans.Entries = append(ans.Entries, *curEntry)
}
if err := b.Err(); err != nil {
return nil, fmt.Errorf("scanner failed %v", err)
}
return &ans, nil
}
// write the index as a text file
func writeIndex(cachedir Abspath, ix *Index) error {
dir, err := IndexDir()
if err != nil {
return err
}
ipat := fmt.Sprintf("index-%d-*", CurrentVersion)
fd, err := os.CreateTemp(dir, ipat)
if err != nil {
return err // can this happen?
}
defer fd.Close()
if err := writeIndexToFile(ix, fd); err != nil {
return err
}
content := fd.Name()
content = filepath.Base(content)
base := indexNameBase(cachedir)
nm := filepath.Join(dir, base)
err = os.WriteFile(nm, []byte(content), 0666)
if err != nil {
return err
}
return nil
}
func writeIndexToFile(x *Index, fd *os.File) error {
cnt := 0
w := bufio.NewWriter(fd)
fmt.Fprintf(w, "%d\n", x.Version)
fmt.Fprintf(w, "%s\n", x.Cachedir)
// round the time down
tm := x.Changed.Add(-time.Second / 2)
fmt.Fprintf(w, "%s\n", tm.Format(time.DateTime))
for _, e := range x.Entries {
if e.ImportPath == "" {
continue // shouldn't happen
}
// PJW: maybe always write these headers as csv?
if strings.ContainsAny(string(e.Dir), ",\"") {
log.Printf("DIR: %s", e.Dir)
cw := csv.NewWriter(w)
cw.Write([]string{":" + e.PkgName, e.ImportPath, string(e.Dir), e.Version})
cw.Flush()
} else {
fmt.Fprintf(w, ":%s,%s,%s,%s\n", e.PkgName, e.ImportPath, e.Dir, e.Version)
}
for _, x := range e.Names {
fmt.Fprintf(w, "%s\n", x)
cnt++
}
}
if err := w.Flush(); err != nil {
return err
}
return nil
}
// tests can override this
var IndexDir = indexDir
// IndexDir computes the directory containing the index
func indexDir() (string, error) {
dir, err := os.UserCacheDir()
if err != nil {
return "", fmt.Errorf("cannot open UserCacheDir, %w", err)
}
return filepath.Join(dir, "go", "imports"), nil
}
// return the base name of the file containing the name of the current index
func indexNameBase(cachedir Abspath) string {
// crc64 is a way to convert path names into 16 hex digits.
h := crc64.Checksum([]byte(cachedir), crc64.MakeTable(crc64.ECMA))
fname := fmt.Sprintf("index-name-%d-%016x", CurrentVersion, h)
return fname
}

145
vendor/golang.org/x/tools/internal/modindex/lookup.go generated vendored Normal file
View File

@@ -0,0 +1,145 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"slices"
"strconv"
"strings"
)
type Candidate struct {
PkgName string
Name string
Dir string
ImportPath string
Type LexType
// information for Funcs
Results int16 // how many results
Sig []Field // arg names and types
}
type Field struct {
Arg, Type string
}
type LexType int8
const (
Const LexType = iota
Var
Type
Func
)
// Lookup finds all the symbols in the index with the given PkgName and name.
// If prefix is true, it finds all of these with name as a prefix.
func (ix *Index) Lookup(pkg, name string, prefix bool) []Candidate {
loc, ok := slices.BinarySearchFunc(ix.Entries, pkg, func(e Entry, pkg string) int {
return strings.Compare(e.PkgName, pkg)
})
if !ok {
return nil // didn't find the package
}
var ans []Candidate
// loc is the first entry for this package name, but there may be severeal
for i := loc; i < len(ix.Entries); i++ {
e := ix.Entries[i]
if e.PkgName != pkg {
break // end of sorted package names
}
nloc, ok := slices.BinarySearchFunc(e.Names, name, func(s string, name string) int {
if strings.HasPrefix(s, name) {
return 0
}
if s < name {
return -1
}
return 1
})
if !ok {
continue // didn't find the name, nor any symbols with name as a prefix
}
for j := nloc; j < len(e.Names); j++ {
nstr := e.Names[j]
// benchmarks show this makes a difference when there are a lot of Possibilities
flds := fastSplit(nstr)
if !(flds[0] == name || prefix && strings.HasPrefix(flds[0], name)) {
// past range of matching Names
break
}
if len(flds) < 2 {
continue // should never happen
}
px := Candidate{
PkgName: pkg,
Name: flds[0],
Dir: string(e.Dir),
ImportPath: e.ImportPath,
Type: asLexType(flds[1][0]),
}
if flds[1] == "F" {
n, err := strconv.Atoi(flds[2])
if err != nil {
continue // should never happen
}
px.Results = int16(n)
if len(flds) >= 4 {
sig := strings.Split(flds[3], " ")
for i := 0; i < len(sig); i++ {
// $ cannot otherwise occur. removing the spaces
// almost works, but for chan struct{}, e.g.
sig[i] = strings.Replace(sig[i], "$", " ", -1)
}
px.Sig = toFields(sig)
}
}
ans = append(ans, px)
}
}
return ans
}
func toFields(sig []string) []Field {
ans := make([]Field, len(sig)/2)
for i := 0; i < len(ans); i++ {
ans[i] = Field{Arg: sig[2*i], Type: sig[2*i+1]}
}
return ans
}
// benchmarks show this is measurably better than strings.Split
func fastSplit(x string) []string {
ans := make([]string, 0, 4)
nxt := 0
start := 0
for i := 0; i < len(x); i++ {
if x[i] != ' ' {
continue
}
ans = append(ans, x[start:i])
nxt++
start = i + 1
if nxt >= 3 {
break
}
}
ans = append(ans, x[start:])
return ans
}
func asLexType(c byte) LexType {
switch c {
case 'C':
return Const
case 'V':
return Var
case 'T':
return Type
case 'F':
return Func
}
return -1
}

164
vendor/golang.org/x/tools/internal/modindex/modindex.go generated vendored Normal file
View File

@@ -0,0 +1,164 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package modindex contains code for building and searching an index to
// the Go module cache. The directory containing the index, returned by
// IndexDir(), contains a file index-name-<ver> that contains the name
// of the current index. We believe writing that short file is atomic.
// ReadIndex reads that file to get the file name of the index.
// WriteIndex writes an index with a unique name and then
// writes that name into a new version of index-name-<ver>.
// (<ver> stands for the CurrentVersion of the index format.)
package modindex
import (
"path/filepath"
"slices"
"strings"
"time"
"golang.org/x/mod/semver"
)
// Create always creates a new index for the go module cache that is in cachedir.
func Create(cachedir string) error {
_, err := indexModCache(cachedir, true)
return err
}
// Update the index for the go module cache that is in cachedir,
// If there is no existing index it will build one.
// If there are changed directories since the last index, it will
// write a new one and return true. Otherwise it returns false.
func Update(cachedir string) (bool, error) {
return indexModCache(cachedir, false)
}
// indexModCache writes an index current as of when it is called.
// If clear is true the index is constructed from all of GOMODCACHE
// otherwise the index is constructed from the last previous index
// and the updates to the cache. It returns true if it wrote an index,
// false otherwise.
func indexModCache(cachedir string, clear bool) (bool, error) {
cachedir, err := filepath.Abs(cachedir)
if err != nil {
return false, err
}
cd := Abspath(cachedir)
future := time.Now().Add(24 * time.Hour) // safely in the future
ok, err := modindexTimed(future, cd, clear)
if err != nil {
return false, err
}
return ok, nil
}
// modindexTimed writes an index current as of onlyBefore.
// If clear is true the index is constructed from all of GOMODCACHE
// otherwise the index is constructed from the last previous index
// and all the updates to the cache before onlyBefore.
// It returns true if it wrote a new index, false if it wrote nothing.
func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
var curIndex *Index
if !clear {
var err error
curIndex, err = ReadIndex(string(cachedir))
if clear && err != nil {
return false, err
}
// TODO(pjw): check that most of those directories still exist
}
cfg := &work{
onlyBefore: onlyBefore,
oldIndex: curIndex,
cacheDir: cachedir,
}
if curIndex != nil {
cfg.onlyAfter = curIndex.Changed
}
if err := cfg.buildIndex(); err != nil {
return false, err
}
if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
// no changes from existing curIndex, don't write a new index
return false, nil
}
if err := cfg.writeIndex(); err != nil {
return false, err
}
return true, nil
}
type work struct {
onlyBefore time.Time // do not use directories later than this
onlyAfter time.Time // only interested in directories after this
// directories from before onlyAfter come from oldIndex
oldIndex *Index
newIndex *Index
cacheDir Abspath
}
func (w *work) buildIndex() error {
// The effective date of the new index should be at least
// slightly earlier than when the directories are scanned
// so set it now.
w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
if len(dirs) == 0 {
return nil
}
newdirs, err := byImportPath(dirs)
if err != nil {
return err
}
// for each import path it might occur only in newdirs,
// only in w.oldIndex, or in both.
// If it occurs in both, use the semantically later one
if w.oldIndex != nil {
for _, e := range w.oldIndex.Entries {
found, ok := newdirs[e.ImportPath]
if !ok {
w.newIndex.Entries = append(w.newIndex.Entries, e)
continue // use this one, there is no new one
}
if semver.Compare(found[0].version, e.Version) > 0 {
// use the new one
} else {
// use the old one, forget the new one
w.newIndex.Entries = append(w.newIndex.Entries, e)
delete(newdirs, e.ImportPath)
}
}
}
// get symbol information for all the new diredtories
getSymbols(w.cacheDir, newdirs)
// assemble the new index entries
for k, v := range newdirs {
d := v[0]
pkg, names := processSyms(d.syms)
if pkg == "" {
continue // PJW: does this ever happen?
}
entry := Entry{
PkgName: pkg,
Dir: d.path,
ImportPath: k,
Version: d.version,
Names: names,
}
w.newIndex.Entries = append(w.newIndex.Entries, entry)
}
// sort the entries in the new index
slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
return n
}
return strings.Compare(l.ImportPath, r.ImportPath)
})
return nil
}
func (w *work) writeIndex() error {
return writeIndex(w.cacheDir, w.newIndex)
}

189
vendor/golang.org/x/tools/internal/modindex/symbols.go generated vendored Normal file
View File

@@ -0,0 +1,189 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"fmt"
"go/ast"
"go/parser"
"go/token"
"go/types"
"os"
"path/filepath"
"slices"
"strings"
"golang.org/x/sync/errgroup"
)
// The name of a symbol contains information about the symbol:
// <name> T for types
// <name> C for consts
// <name> V for vars
// and for funcs: <name> F <num of return values> (<arg-name> <arg-type>)*
// any spaces in <arg-type> are replaced by $s so that the fields
// of the name are space separated
type symbol struct {
pkg string // name of the symbols's package
name string // declared name
kind string // T, C, V, or F
sig string // signature information, for F
}
// find the symbols for the best directories
func getSymbols(cd Abspath, dirs map[string][]*directory) {
var g errgroup.Group
g.SetLimit(-1) // maybe throttle this some day
for _, vv := range dirs {
// throttling some day?
d := vv[0]
g.Go(func() error {
thedir := filepath.Join(string(cd), string(d.path))
mode := parser.SkipObjectResolution
fi, err := os.ReadDir(thedir)
if err != nil {
return nil // log this someday?
}
for _, fx := range fi {
if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
continue
}
fname := filepath.Join(thedir, fx.Name())
tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
if err != nil {
continue // ignore errors, someday log them?
}
d.syms = append(d.syms, getFileExports(tr)...)
}
return nil
})
}
g.Wait()
}
func getFileExports(f *ast.File) []symbol {
pkg := f.Name.Name
if pkg == "main" {
return nil
}
var ans []symbol
// should we look for //go:build ignore?
for _, decl := range f.Decls {
switch decl := decl.(type) {
case *ast.FuncDecl:
if decl.Recv != nil {
// ignore methods, as we are completing package selections
continue
}
name := decl.Name.Name
dtype := decl.Type
// not looking at dtype.TypeParams. That is, treating
// generic functions just like non-generic ones.
sig := dtype.Params
kind := "F"
result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
for _, x := range sig.List {
// This code creates a string representing the type.
// TODO(pjw): it may be fragile:
// 1. x.Type could be nil, perhaps in ill-formed code
// 2. ExprString might someday change incompatibly to
// include struct tags, which can be arbitrary strings
if x.Type == nil {
// Can this happen without a parse error? (Files with parse
// errors are ignored in getSymbols)
continue // maybe report this someday
}
tp := types.ExprString(x.Type)
if len(tp) == 0 {
// Can this happen?
continue // maybe report this someday
}
// This is only safe if ExprString never returns anything with a $
// The only place a $ can occur seems to be in a struct tag, which
// can be an arbitrary string literal, and ExprString does not presently
// print struct tags. So for this to happen the type of a formal parameter
// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
// would have to show the struct tag. Even testing for this case seems
// a waste of effort, but let's not ignore such pathologies
if strings.Contains(tp, "$") {
continue
}
tp = strings.Replace(tp, " ", "$", -1)
if len(x.Names) == 0 {
result = append(result, "_")
result = append(result, tp)
} else {
for _, y := range x.Names {
result = append(result, y.Name)
result = append(result, tp)
}
}
}
sigs := strings.Join(result, " ")
if s := newsym(pkg, name, kind, sigs); s != nil {
ans = append(ans, *s)
}
case *ast.GenDecl:
switch decl.Tok {
case token.CONST, token.VAR:
tp := "V"
if decl.Tok == token.CONST {
tp = "C"
}
for _, sp := range decl.Specs {
for _, x := range sp.(*ast.ValueSpec).Names {
if s := newsym(pkg, x.Name, tp, ""); s != nil {
ans = append(ans, *s)
}
}
}
case token.TYPE:
for _, sp := range decl.Specs {
if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, "T", ""); s != nil {
ans = append(ans, *s)
}
}
}
}
}
return ans
}
func newsym(pkg, name, kind, sig string) *symbol {
if len(name) == 0 || !ast.IsExported(name) {
return nil
}
sym := symbol{pkg: pkg, name: name, kind: kind, sig: sig}
return &sym
}
// return the package name and the value for the symbols.
// if there are multiple packages, choose one arbitrarily
// the returned slice is sorted lexicographically
func processSyms(syms []symbol) (string, []string) {
if len(syms) == 0 {
return "", nil
}
slices.SortFunc(syms, func(l, r symbol) int {
return strings.Compare(l.name, r.name)
})
pkg := syms[0].pkg
var names []string
for _, s := range syms {
var nx string
if s.pkg == pkg {
if s.sig != "" {
nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
} else {
nx = fmt.Sprintf("%s %s", s.name, s.kind)
}
names = append(names, nx)
} else {
continue // PJW: do we want to keep track of these?
}
}
return pkg, names
}

25
vendor/golang.org/x/tools/internal/modindex/types.go generated vendored Normal file
View File

@@ -0,0 +1,25 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package modindex
import (
"strings"
)
// some special types to avoid confusions
// distinguish various types of directory names. It's easy to get confused.
type Abspath string // absolute paths
type Relpath string // paths with GOMODCACHE prefix removed
func toRelpath(cachedir Abspath, s string) Relpath {
if strings.HasPrefix(s, string(cachedir)) {
if s == string(cachedir) {
return Relpath("")
}
return Relpath(s[len(cachedir)+1:])
}
return Relpath(s)
}

View File

@@ -5,7 +5,6 @@
// Package packagesinternal exposes internal-only fields from go/packages.
package packagesinternal
var GetForTest = func(p interface{}) string { return "" }
var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
type PackageError struct {
@@ -16,7 +15,6 @@ type PackageError struct {
var TypecheckCgo int
var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
var ForTest int // must be set as a LoadMode to call GetForTest
var SetModFlag = func(config interface{}, value string) {}
var SetModFile = func(config interface{}, value string) {}

View File

@@ -6,6 +6,8 @@ package typeparams
import (
"go/types"
"golang.org/x/tools/internal/aliases"
)
// Free is a memoization of the set of free type parameters within a
@@ -36,6 +38,18 @@ func (w *Free) Has(typ types.Type) (res bool) {
break
case *types.Alias:
if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() {
return true // This is an uninstantiated Alias.
}
// The expansion of an alias can have free type parameters,
// whether or not the alias itself has type parameters:
//
// func _[K comparable]() {
// type Set = map[K]bool // free(Set) = {K}
// type MapTo[V] = map[K]V // free(Map[foo]) = {V}
// }
//
// So, we must Unalias.
return w.Has(types.Unalias(t))
case *types.Array:
@@ -96,9 +110,8 @@ func (w *Free) Has(typ types.Type) (res bool) {
case *types.Named:
args := t.TypeArgs()
// TODO(taking): this does not match go/types/infer.go. Check with rfindley.
if params := t.TypeParams(); params.Len() > args.Len() {
return true
return true // this is an uninstantiated named type.
}
for i, n := 0, args.Len(); i < n; i++ {
if w.Has(args.At(i)) {

View File

@@ -11,6 +11,8 @@ import (
"go/types"
"reflect"
"unsafe"
"golang.org/x/tools/internal/aliases"
)
func SetUsesCgo(conf *types.Config) bool {
@@ -63,3 +65,57 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
return other.Name()
}
}
// A NamedOrAlias is a [types.Type] that is named (as
// defined by the spec) and capable of bearing type parameters: it
// abstracts aliases ([types.Alias]) and defined types
// ([types.Named]).
//
// Every type declared by an explicit "type" declaration is a
// NamedOrAlias. (Built-in type symbols may additionally
// have type [types.Basic], which is not a NamedOrAlias,
// though the spec regards them as "named".)
//
// NamedOrAlias cannot expose the Origin method, because
// [types.Alias.Origin] and [types.Named.Origin] have different
// (covariant) result types; use [Origin] instead.
type NamedOrAlias interface {
types.Type
Obj() *types.TypeName
}
// TypeParams is a light shim around t.TypeParams().
// (go/types.Alias).TypeParams requires >= 1.23.
func TypeParams(t NamedOrAlias) *types.TypeParamList {
switch t := t.(type) {
case *types.Alias:
return aliases.TypeParams(t)
case *types.Named:
return t.TypeParams()
}
return nil
}
// TypeArgs is a light shim around t.TypeArgs().
// (go/types.Alias).TypeArgs requires >= 1.23.
func TypeArgs(t NamedOrAlias) *types.TypeList {
switch t := t.(type) {
case *types.Alias:
return aliases.TypeArgs(t)
case *types.Named:
return t.TypeArgs()
}
return nil
}
// Origin returns the generic type of the Named or Alias type t if it
// is instantiated, otherwise it returns t.
func Origin(t NamedOrAlias) NamedOrAlias {
switch t := t.(type) {
case *types.Alias:
return aliases.Origin(t)
case *types.Named:
return t.Origin()
}
return t
}

View File

@@ -0,0 +1,282 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typesinternal
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"strconv"
"strings"
)
// ZeroString returns the string representation of the "zero" value of the type t.
// This string can be used on the right-hand side of an assignment where the
// left-hand side has that explicit type.
// Exception: This does not apply to tuples. Their string representation is
// informational only and cannot be used in an assignment.
// When assigning to a wider type (such as 'any'), it's the caller's
// responsibility to handle any necessary type conversions.
// See [ZeroExpr] for a variant that returns an [ast.Expr].
func ZeroString(t types.Type, qf types.Qualifier) string {
switch t := t.(type) {
case *types.Basic:
switch {
case t.Info()&types.IsBoolean != 0:
return "false"
case t.Info()&types.IsNumeric != 0:
return "0"
case t.Info()&types.IsString != 0:
return `""`
case t.Kind() == types.UnsafePointer:
fallthrough
case t.Kind() == types.UntypedNil:
return "nil"
default:
panic(fmt.Sprint("ZeroString for unexpected type:", t))
}
case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
return "nil"
case *types.Named, *types.Alias:
switch under := t.Underlying().(type) {
case *types.Struct, *types.Array:
return types.TypeString(t, qf) + "{}"
default:
return ZeroString(under, qf)
}
case *types.Array, *types.Struct:
return types.TypeString(t, qf) + "{}"
case *types.TypeParam:
// Assumes func new is not shadowed.
return "*new(" + types.TypeString(t, qf) + ")"
case *types.Tuple:
// Tuples are not normal values.
// We are currently format as "(t[0], ..., t[n])". Could be something else.
components := make([]string, t.Len())
for i := 0; i < t.Len(); i++ {
components[i] = ZeroString(t.At(i).Type(), qf)
}
return "(" + strings.Join(components, ", ") + ")"
case *types.Union:
// Variables of these types cannot be created, so it makes
// no sense to ask for their zero value.
panic(fmt.Sprintf("invalid type for a variable: %v", t))
default:
panic(t) // unreachable.
}
}
// ZeroExpr returns the ast.Expr representation of the "zero" value of the type t.
// ZeroExpr is defined for types that are suitable for variables.
// It may panic for other types such as Tuple or Union.
// See [ZeroString] for a variant that returns a string.
func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
switch t := typ.(type) {
case *types.Basic:
switch {
case t.Info()&types.IsBoolean != 0:
return &ast.Ident{Name: "false"}
case t.Info()&types.IsNumeric != 0:
return &ast.BasicLit{Kind: token.INT, Value: "0"}
case t.Info()&types.IsString != 0:
return &ast.BasicLit{Kind: token.STRING, Value: `""`}
case t.Kind() == types.UnsafePointer:
fallthrough
case t.Kind() == types.UntypedNil:
return ast.NewIdent("nil")
default:
panic(fmt.Sprint("ZeroExpr for unexpected type:", t))
}
case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
return ast.NewIdent("nil")
case *types.Named, *types.Alias:
switch under := t.Underlying().(type) {
case *types.Struct, *types.Array:
return &ast.CompositeLit{
Type: TypeExpr(f, pkg, typ),
}
default:
return ZeroExpr(f, pkg, under)
}
case *types.Array, *types.Struct:
return &ast.CompositeLit{
Type: TypeExpr(f, pkg, typ),
}
case *types.TypeParam:
return &ast.StarExpr{ // *new(T)
X: &ast.CallExpr{
// Assumes func new is not shadowed.
Fun: ast.NewIdent("new"),
Args: []ast.Expr{
ast.NewIdent(t.Obj().Name()),
},
},
}
case *types.Tuple:
// Unlike ZeroString, there is no ast.Expr can express tuple by
// "(t[0], ..., t[n])".
panic(fmt.Sprintf("invalid type for a variable: %v", t))
case *types.Union:
// Variables of these types cannot be created, so it makes
// no sense to ask for their zero value.
panic(fmt.Sprintf("invalid type for a variable: %v", t))
default:
panic(t) // unreachable.
}
}
// IsZeroExpr uses simple syntactic heuristics to report whether expr
// is a obvious zero value, such as 0, "", nil, or false.
// It cannot do better without type information.
func IsZeroExpr(expr ast.Expr) bool {
switch e := expr.(type) {
case *ast.BasicLit:
return e.Value == "0" || e.Value == `""`
case *ast.Ident:
return e.Name == "nil" || e.Name == "false"
default:
return false
}
}
// TypeExpr returns syntax for the specified type. References to named types
// from packages other than pkg are qualified by an appropriate package name, as
// defined by the import environment of file.
// It may panic for types such as Tuple or Union.
func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
switch t := typ.(type) {
case *types.Basic:
switch t.Kind() {
case types.UnsafePointer:
// TODO(hxjiang): replace the implementation with types.Qualifier.
return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")}
default:
return ast.NewIdent(t.Name())
}
case *types.Pointer:
return &ast.UnaryExpr{
Op: token.MUL,
X: TypeExpr(f, pkg, t.Elem()),
}
case *types.Array:
return &ast.ArrayType{
Len: &ast.BasicLit{
Kind: token.INT,
Value: fmt.Sprintf("%d", t.Len()),
},
Elt: TypeExpr(f, pkg, t.Elem()),
}
case *types.Slice:
return &ast.ArrayType{
Elt: TypeExpr(f, pkg, t.Elem()),
}
case *types.Map:
return &ast.MapType{
Key: TypeExpr(f, pkg, t.Key()),
Value: TypeExpr(f, pkg, t.Elem()),
}
case *types.Chan:
dir := ast.ChanDir(t.Dir())
if t.Dir() == types.SendRecv {
dir = ast.SEND | ast.RECV
}
return &ast.ChanType{
Dir: dir,
Value: TypeExpr(f, pkg, t.Elem()),
}
case *types.Signature:
var params []*ast.Field
for i := 0; i < t.Params().Len(); i++ {
params = append(params, &ast.Field{
Type: TypeExpr(f, pkg, t.Params().At(i).Type()),
Names: []*ast.Ident{
{
Name: t.Params().At(i).Name(),
},
},
})
}
if t.Variadic() {
last := params[len(params)-1]
last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt}
}
var returns []*ast.Field
for i := 0; i < t.Results().Len(); i++ {
returns = append(returns, &ast.Field{
Type: TypeExpr(f, pkg, t.Results().At(i).Type()),
})
}
return &ast.FuncType{
Params: &ast.FieldList{
List: params,
},
Results: &ast.FieldList{
List: returns,
},
}
case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam}
switch t.Obj().Pkg() {
case pkg, nil:
return ast.NewIdent(t.Obj().Name())
}
pkgName := t.Obj().Pkg().Name()
// TODO(hxjiang): replace the implementation with types.Qualifier.
// If the file already imports the package under another name, use that.
for _, cand := range f.Imports {
if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() {
if cand.Name != nil && cand.Name.Name != "" {
pkgName = cand.Name.Name
}
}
}
if pkgName == "." {
return ast.NewIdent(t.Obj().Name())
}
return &ast.SelectorExpr{
X: ast.NewIdent(pkgName),
Sel: ast.NewIdent(t.Obj().Name()),
}
case *types.Struct:
return ast.NewIdent(t.String())
case *types.Interface:
return ast.NewIdent(t.String())
case *types.Union:
// TODO(hxjiang): handle the union through syntax (~A | ... | ~Z).
// Remove nil check when calling typesinternal.TypeExpr.
return nil
case *types.Tuple:
panic("invalid input type types.Tuple")
default:
panic("unreachable")
}
}

View File

@@ -1,13 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package versions
import "go/build/constraint"
// ConstraintGoVersion is constraint.GoVersion (if built with go1.21+).
// Otherwise nil.
//
// Deprecate once x/tools is after go1.21.
var ConstraintGoVersion func(x constraint.Expr) string

View File

@@ -1,14 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21
// +build go1.21
package versions
import "go/build/constraint"
func init() {
ConstraintGoVersion = constraint.GoVersion
}

View File

@@ -31,8 +31,3 @@ func FileVersion(info *types.Info, file *ast.File) string {
// This would act as a max version on what a tool can support.
return Future
}
// InitFileVersions initializes info to record Go versions for Go files.
func InitFileVersions(info *types.Info) {
info.FileVersions = make(map[*ast.File]string)
}

14
vendor/modules.txt vendored
View File

@@ -728,7 +728,7 @@ github.com/google/go-querystring/query
# github.com/google/go-tika v0.3.1
## explicit; go 1.11
github.com/google/go-tika/tika
# github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db
# github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad
## explicit; go 1.22
github.com/google/pprof/profile
# github.com/google/renameio/v2 v2.0.0
@@ -1077,7 +1077,7 @@ github.com/onsi/ginkgo/reporters/stenographer
github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty
github.com/onsi/ginkgo/types
# github.com/onsi/ginkgo/v2 v2.22.0
# github.com/onsi/ginkgo/v2 v2.22.2
## explicit; go 1.22.0
github.com/onsi/ginkgo/v2
github.com/onsi/ginkgo/v2/config
@@ -1099,8 +1099,8 @@ github.com/onsi/ginkgo/v2/internal/parallel_support
github.com/onsi/ginkgo/v2/internal/testingtproxy
github.com/onsi/ginkgo/v2/reporters
github.com/onsi/ginkgo/v2/types
# github.com/onsi/gomega v1.36.0
## explicit; go 1.22
# github.com/onsi/gomega v1.36.2
## explicit; go 1.22.0
github.com/onsi/gomega
github.com/onsi/gomega/format
github.com/onsi/gomega/internal
@@ -1679,7 +1679,6 @@ github.com/rogpeppe/go-internal/internal/syscall/windows
github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll
github.com/rogpeppe/go-internal/lockedfile
github.com/rogpeppe/go-internal/lockedfile/internal/filelock
github.com/rogpeppe/go-internal/semver
# github.com/rs/cors v1.11.1
## explicit; go 1.13
github.com/rs/cors
@@ -2140,7 +2139,7 @@ golang.org/x/image/math/fixed
golang.org/x/image/tiff
golang.org/x/image/tiff/lzw
golang.org/x/image/vector
# golang.org/x/mod v0.21.0
# golang.org/x/mod v0.22.0
## explicit; go 1.22.0
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
@@ -2219,7 +2218,7 @@ golang.org/x/text/width
# golang.org/x/time v0.8.0
## explicit; go 1.18
golang.org/x/time/rate
# golang.org/x/tools v0.26.0
# golang.org/x/tools v0.28.0
## explicit; go 1.22.0
golang.org/x/tools/cover
golang.org/x/tools/go/ast/astutil
@@ -2238,6 +2237,7 @@ golang.org/x/tools/internal/gcimporter
golang.org/x/tools/internal/gocommand
golang.org/x/tools/internal/gopathwalk
golang.org/x/tools/internal/imports
golang.org/x/tools/internal/modindex
golang.org/x/tools/internal/packagesinternal
golang.org/x/tools/internal/pkgbits
golang.org/x/tools/internal/stdlib